code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 705 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A__ : Dict= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : List[str] =["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ) -> None:
'''simple docstring'''
super().__init__(**snake_case_ )
UpperCamelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(snake_case_ , size=size['shortest_edge'] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(snake_case_ , param_name='crop_size' )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
UpperCamelCase__ = {'pixel_values': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(snake_case_ ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(snake_case_ ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=snake_case_ )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 706 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A__ : str= (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A__ : list[int]= [ord(letter) for letter in string.ascii_lowercase]
A__ : set[int]= {ord(char) for char in VALID_CHARS}
A__ : list[str]= ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str | None:
"""simple docstring"""
UpperCamelCase__ = ''
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE )
return decoded
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[str]:
"""simple docstring"""
UpperCamelCase__ = []
for key in product(SCREAMING_SNAKE_CASE , repeat=3 ):
UpperCamelCase__ = try_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE )
return possibles
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = "p059_cipher.txt" ) -> int:
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = Path(SCREAMING_SNAKE_CASE ).parent.joinpath(SCREAMING_SNAKE_CASE ).read_text(encoding='utf-8' )
UpperCamelCase__ = [int(SCREAMING_SNAKE_CASE ) for number in data.strip().split(',' )]
UpperCamelCase__ = filter_valid_chars(SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
UpperCamelCase__ = filter_common_word(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
break
UpperCamelCase__ = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 707 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( _a , _a , _a , unittest.TestCase ):
a : Tuple =StableDiffusionInstructPixaPixPipeline
a : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : Dict =IMAGE_TO_IMAGE_IMAGE_PARAMS
a : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ = CLIPTextModel(snake_case_ )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=0 ) -> Union[str, Any]:
UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' )
if str(snake_case_ ).startswith('mps' ):
UpperCamelCase__ = torch.manual_seed(snake_case_ )
else:
UpperCamelCase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = sd_pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = 'french fries'
UpperCamelCase__ = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
UpperCamelCase__ = output.images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = [inputs['prompt']] * 2
UpperCamelCase__ = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
UpperCamelCase__ = torch.from_numpy(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
UpperCamelCase__ = image / 2 + 0.5
UpperCamelCase__ = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase__ = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase__ = sd_pipe(**snake_case_ ).images
UpperCamelCase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCamelCase__ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = self.get_dummy_inputs(snake_case_ )
UpperCamelCase__ = sd_pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = [round(snake_case_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(snake_case_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
UpperCamelCase__ = VaeImageProcessor(do_resize=snake_case_ , do_normalize=snake_case_ )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs_by_type(snake_case_ , input_image_type='pt' ) )[0]
UpperCamelCase__ = components['vae']
UpperCamelCase__ = self.get_dummy_inputs_by_type(snake_case_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase__ = pipe(**snake_case_ )[0]
UpperCamelCase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 ) -> Tuple:
UpperCamelCase__ = torch.manual_seed(snake_case_ )
UpperCamelCase__ = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
UpperCamelCase__ = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
UpperCamelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
UpperCamelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = 0
def callback_fn(snake_case_ , snake_case_ , snake_case_ ) -> None:
UpperCamelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ = latents[0, -3:, -3:, -1]
UpperCamelCase__ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCamelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ = latents[0, -3:, -3:, -1]
UpperCamelCase__ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCamelCase__ = False
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = self.get_inputs()
UpperCamelCase__ = pipe(**snake_case_ )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ = inputs['image'].resize((504, 504) )
UpperCamelCase__ = 'timbrooks/instruct-pix2pix'
UpperCamelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case_ , safety_checker=snake_case_ , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
UpperCamelCase__ = pipe(**snake_case_ )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCamelCase__ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 708 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( _a ):
a : Tuple ="""dandelin/vilt-b32-finetuned-vqa"""
a : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
a : Optional[Any] ="""image_qa"""
a : Dict =AutoProcessor
a : int =AutoModelForVisualQuestionAnswering
a : str =["""image""", """text"""]
a : List[Any] =["""text"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(self , ['vision'] )
super().__init__(*snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
return self.pre_processor(snake_case_ , snake_case_ , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
with torch.no_grad():
return self.model(**snake_case_ ).logits
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[Any]:
UpperCamelCase__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx] | 709 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
UpperCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ = 1
if upper_limit > 0:
UpperCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
A__ : List[str]= int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A__= get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __lowerCamelCase ( _a , unittest.TestCase ):
a : str =BartphoTokenizer
a : Union[str, Any] =False
a : str =True
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = ['▁This', '▁is', '▁a', '▁t', 'est']
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = {'unk_token': '<unk>'}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
UpperCamelCase__ = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
UpperCamelCase__ = 'This is a là test'
UpperCamelCase__ = 'This is a<unk><unk> test'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = BartphoTokenizer(snake_case_ , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCamelCase__ = 'This is a là test'
UpperCamelCase__ = '▁This ▁is ▁a ▁l à ▁t est'.split()
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 711 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ) -> Optional[int]:
UpperCamelCase__ = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase__ = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 712 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : List[str]= {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str]= ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int= [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any]= [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int= [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A__ : Optional[Any]= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__ : Tuple= logging.get_logger(__name__) # pylint: disable=invalid-name
A__ : List[str]= """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5_12 , SCREAMING_SNAKE_CASE=5_12 ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase__ = np.array(pil_image.convert('RGB' ) )
UpperCamelCase__ = arr.astype(np.floataa ) / 127.5 - 1
UpperCamelCase__ = np.transpose(SCREAMING_SNAKE_CASE , [2, 0, 1] )
UpperCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
# get the original timestep using init_timestep
UpperCamelCase__ = min(int(num_inference_steps * strength ) , snake_case_ )
UpperCamelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> str:
if not isinstance(snake_case_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case_ )}' )
UpperCamelCase__ = image.to(device=snake_case_ , dtype=snake_case_ )
UpperCamelCase__ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase__ = image
else:
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ )
]
UpperCamelCase__ = torch.cat(snake_case_ , dim=0 )
else:
UpperCamelCase__ = self.movq.encode(snake_case_ ).latent_dist.sample(snake_case_ )
UpperCamelCase__ = self.movq.config.scaling_factor * init_latents
UpperCamelCase__ = torch.cat([init_latents] , dim=0 )
UpperCamelCase__ = init_latents.shape
UpperCamelCase__ = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
UpperCamelCase__ = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase__ = torch.device(F'cuda:{gpu_id}' )
UpperCamelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase__ = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 100 , snake_case_ = 4.0 , snake_case_ = 0.3 , snake_case_ = 1 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> List[str]:
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = torch.cat(snake_case_ , dim=0 )
UpperCamelCase__ = image_embeds.shape[0]
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = [image]
if not all(isinstance(snake_case_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(snake_case_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
UpperCamelCase__ = torch.cat([prepare_image(snake_case_ , snake_case_ , snake_case_ ) for i in image] , dim=0 )
UpperCamelCase__ = image.to(dtype=image_embeds.dtype , device=snake_case_ )
UpperCamelCase__ = self.movq.encode(snake_case_ )['latents']
UpperCamelCase__ = latents.repeat_interleave(snake_case_ , dim=0 )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = self.get_timesteps(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase__ , UpperCamelCase__ = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
UpperCamelCase__ = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , image_embeds.dtype , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {'image_embeds': image_embeds}
UpperCamelCase__ = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
UpperCamelCase__ = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 714 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 0 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Image:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = image.size
UpperCamelCase__ = 0
UpperCamelCase__ = image.load()
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE ):
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
A__ : List[str]= mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 715 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A__ : Dict= """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
A__ : str= F"""https://www.google.com/search?q={query}&num=100"""
A__ : Optional[Any]= requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
A__ : Tuple= (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
A__ : Optional[Any]= parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 716 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase__ , UpperCamelCase__ = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase__ , UpperCamelCase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 0 |
"""simple docstring"""
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = name
UpperCamelCase__ = val
def __str__( self ) -> Any:
return F'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , snake_case_ ) -> List[Any]:
return self.val < other.val
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = {}
UpperCamelCase__ = {}
UpperCamelCase__ = self.build_heap(snake_case_ )
def __getitem__( self , snake_case_ ) -> Optional[Any]:
return self.get_value(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
return (idx - 1) // 2
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Union[str, Any]:
return idx * 2 + 2
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[Any]:
return self.heap_dict[key]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
UpperCamelCase__ = len(snake_case_ ) - 1
UpperCamelCase__ = self.get_parent_idx(snake_case_ )
for idx, i in enumerate(snake_case_ ):
UpperCamelCase__ = idx
UpperCamelCase__ = i.val
for i in range(snake_case_ , -1 , -1 ):
self.sift_down(snake_case_ , snake_case_ )
return array
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
while True:
UpperCamelCase__ = self.get_left_child_idx(snake_case_ ) # noqa: E741
UpperCamelCase__ = self.get_right_child_idx(snake_case_ )
UpperCamelCase__ = idx
if l < len(snake_case_ ) and array[l] < array[idx]:
UpperCamelCase__ = l
if r < len(snake_case_ ) and array[r] < array[smallest]:
UpperCamelCase__ = r
if smallest != idx:
UpperCamelCase__ , UpperCamelCase__ = array[smallest], array[idx]
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCamelCase__ = smallest
else:
break
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> str:
UpperCamelCase__ = self.get_parent_idx(snake_case_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCamelCase__ , UpperCamelCase__ = self.heap[idx], self.heap[p]
UpperCamelCase__ , UpperCamelCase__ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCamelCase__ = p
UpperCamelCase__ = self.get_parent_idx(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
return self.heap[0]
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ , UpperCamelCase__ = self.heap[-1], self.heap[0]
UpperCamelCase__ , UpperCamelCase__ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCamelCase__ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
self.heap.append(snake_case_ )
UpperCamelCase__ = len(self.heap ) - 1
UpperCamelCase__ = node.val
self.sift_up(len(self.heap ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Tuple:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCamelCase__ = new_value
UpperCamelCase__ = new_value
self.sift_up(self.idx_of_element[node] )
A__ : Any= Node("""R""", -1)
A__ : Any= Node("""B""", 6)
A__ : Optional[int]= Node("""A""", 3)
A__ : List[str]= Node("""X""", 1)
A__ : int= Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A__ : str= MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
UpperCamelCase__ = parser.add_parser('env' )
download_parser.set_defaults(func=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = huggingface_hub.__version__
UpperCamelCase__ = 'not installed'
UpperCamelCase__ = 'NA'
if is_torch_available():
import torch
UpperCamelCase__ = torch.__version__
UpperCamelCase__ = torch.cuda.is_available()
UpperCamelCase__ = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase__ = transformers.__version__
UpperCamelCase__ = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase__ = accelerate.__version__
UpperCamelCase__ = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase__ = xformers.__version__
UpperCamelCase__ = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(snake_case_ ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 719 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ : Optional[Any]= TypeVar("""T""")
A__ : Optional[Any]= TypeVar("""U""")
class __lowerCamelCase ( Generic[T, U] ):
def __init__( self , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self ) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCamelCase ( Generic[T, U] ):
def __init__( self ) -> None:
UpperCamelCase__ = DoubleLinkedListNode(snake_case_ , snake_case_ )
UpperCamelCase__ = DoubleLinkedListNode(snake_case_ , snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self ) -> str:
UpperCamelCase__ = ['DoubleLinkedList']
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class __lowerCamelCase ( Generic[T, U] ):
a : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__( self , snake_case_ ) -> Tuple:
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self ) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , snake_case_ ) -> bool:
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(snake_case_ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(snake_case_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(snake_case_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , 'cache_info' , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
UpperCamelCase__ = str(bin(SCREAMING_SNAKE_CASE ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
UpperCamelCase__ = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
if shift_amount >= len(SCREAMING_SNAKE_CASE ):
return "0b0"
UpperCamelCase__ = binary_number[: len(SCREAMING_SNAKE_CASE ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
UpperCamelCase__ = '0' + str(bin(SCREAMING_SNAKE_CASE ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCamelCase__ = len(bin(SCREAMING_SNAKE_CASE )[3:] ) # Find 2's complement of number
UpperCamelCase__ = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ = (
'1' + '0' * (binary_number_length - len(SCREAMING_SNAKE_CASE )) + binary_number
)
if shift_amount >= len(SCREAMING_SNAKE_CASE ):
return "0b" + binary_number[0] * len(SCREAMING_SNAKE_CASE )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(SCREAMING_SNAKE_CASE ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
"""simple docstring"""
UpperCamelCase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase__ = x_den * y_den * z_den
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 35 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = 42
UpperCamelCase__ = Fraction(0 )
UpperCamelCase__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase__ = x_num * y_den + x_den * y_num
UpperCamelCase__ = x_den * y_den
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
UpperCamelCase__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase__ = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=-1
UpperCamelCase__ = x_num * y_num
UpperCamelCase__ = x_den * y_num + x_num * y_den
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
# n=2
UpperCamelCase__ = x_num * x_num * y_num * y_num
UpperCamelCase__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE ) and is_sq(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = int(sqrt(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase__ = add_three(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
unique_s.add(SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : List[Any]= ["""bert-base-uncased""", """bert-base-cased"""]
A__ : Optional[int]= """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __lowerCamelCase ( tf.keras.Model ):
def __init__( self , snake_case_ ) -> Optional[int]:
super().__init__()
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ )
UpperCamelCase__ = TFAutoModel.from_config(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
UpperCamelCase__ = self.tokenizer(snake_case_ )
UpperCamelCase__ = self.bert(**snake_case_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = [
BertTokenizer.from_pretrained(snake_case_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase__ = [TFBertTokenizer.from_pretrained(snake_case_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case_ , use_fast_bert_tokenizer=snake_case_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tokenizer(snake_case_ , return_tensors='tf' , padding='longest' )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf_tokenizer(self.paired_sentences )
UpperCamelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = tf.function(snake_case_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase__ = tf.constant(snake_case_ )
UpperCamelCase__ = compiled_tokenizer(snake_case_ )
UpperCamelCase__ = tf_tokenizer(snake_case_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ = ModelToSave(tokenizer=snake_case_ )
UpperCamelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase__ = model(snake_case_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(snake_case_ ) / 'saved.model'
model.save(snake_case_ )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = loaded_model(snake_case_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 20 | 0 |
"""simple docstring"""
A__ : dict[str, float]= {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
A__ : dict[str, float]= {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCamelCase__ = (
F'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
F'Valid values are: {", ".join(SCREAMING_SNAKE_CASE )}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any]= logging.get_logger(__name__)
A__ : Any= {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __lowerCamelCase ( _a ):
a : int ="""glpn"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=64 , snake_case_=10 , snake_case_=-1 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = max_depth
UpperCamelCase__ = head_in_index
| 703 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
A__ : str= {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
A__ : str= {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCamelCase__ = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if images.ndim == 3:
UpperCamelCase__ = images[None, ...]
UpperCamelCase__ = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCamelCase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
UpperCamelCase__ = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 20 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
UpperCamelCase__ = numbers[i]
if number < 0:
UpperCamelCase__ , UpperCamelCase__ = min_till_now, max_till_now
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE , max_till_now * number )
UpperCamelCase__ = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 704 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 0 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__ : Optional[int]= logging.get_logger(__name__)
@add_end_docstrings(_a )
class __lowerCamelCase ( _a ):
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = {}, {}
if padding is not None:
UpperCamelCase__ = padding
if truncation is not None:
UpperCamelCase__ = truncation
if top_k is not None:
UpperCamelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , snake_case_ = None , **snake_case_ ) -> Any:
if isinstance(snake_case_ , (Image.Image, str) ) and isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = {'image': image, 'question': question}
else:
UpperCamelCase__ = image
UpperCamelCase__ = super().__call__(snake_case_ , **snake_case_ )
return results
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=False , snake_case_=False ) -> Tuple:
UpperCamelCase__ = load_image(inputs['image'] )
UpperCamelCase__ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=snake_case_ , truncation=snake_case_ )
UpperCamelCase__ = self.image_processor(images=snake_case_ , return_tensors=self.framework )
model_inputs.update(snake_case_ )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.model(**snake_case_ )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_=5 ) -> int:
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.sigmoid()[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(snake_case_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 705 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A__ : Any= TypeVar("""T""")
class __lowerCamelCase ( Generic[T] ):
def __init__( self , snake_case_ ) -> None:
UpperCamelCase__ = data
UpperCamelCase__ = self
UpperCamelCase__ = 0
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# map from node name to the node object
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# create a new set with x as its member
UpperCamelCase__ = DisjointSetTreeNode(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
UpperCamelCase__ = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase__ = nodea
else:
UpperCamelCase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(snake_case_ ) , self.find_set(snake_case_ ) )
class __lowerCamelCase ( Generic[T] ):
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
# add an edge with the given weight
self.add_node(snake_case_ )
self.add_node(snake_case_ )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> GraphUndirectedWeighted[T]:
UpperCamelCase__ = []
UpperCamelCase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case_ : x[2] )
# creating the disjoint set
UpperCamelCase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case_ )
# MST generation
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = edges[index]
index += 1
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
UpperCamelCase__ = disjoint_set.find_set(snake_case_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case_ , snake_case_ , snake_case_ )
disjoint_set.union(snake_case_ , snake_case_ )
return graph
| 20 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A__ : List[Any]= logging.get_logger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = downstream_dict['projector.weight']
UpperCamelCase__ = downstream_dict['projector.bias']
UpperCamelCase__ = downstream_dict['model.post_net.linear.weight']
UpperCamelCase__ = downstream_dict['model.post_net.linear.bias']
return model
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = downstream_dict['model.linear.weight']
UpperCamelCase__ = downstream_dict['model.linear.bias']
return model
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = downstream_dict['connector.weight']
UpperCamelCase__ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase__ = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
UpperCamelCase__ = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase__ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase__ = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
UpperCamelCase__ = checkpoint['Downstream']
UpperCamelCase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE )
UpperCamelCase__ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCamelCase__ = convert_classification(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCamelCase__ = convert_diarization(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif arch.endswith('ForXVector' ):
UpperCamelCase__ = convert_xvector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : str= argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A__ : List[Any]= parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 706 |
"""simple docstring"""
A__ : Tuple= """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ = Stack()
UpperCamelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
UpperCamelCase__ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A__ : int= """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase__ = 'A painting of a squirrel eating a burger'
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase__ = replicate(snake_case_ )
UpperCamelCase__ = shard(snake_case_ )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase__ = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=25 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = 'stabilityai/stable-diffusion-2'
UpperCamelCase__ , UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , revision='bf16' , dtype=jnp.bfloataa , )
UpperCamelCase__ = scheduler_params
UpperCamelCase__ = 'A painting of a squirrel eating a burger'
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase__ = replicate(snake_case_ )
UpperCamelCase__ = shard(snake_case_ )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase__ = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=25 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 253:256, 253:256, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 707 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any]= logging.get_logger(__name__)
A__ : List[str]= {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class __lowerCamelCase ( _a ):
a : List[str] ="""pix2struct_text_model"""
a : Any =["""past_key_values"""]
a : str ={
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=5_0244 , snake_case_=768 , snake_case_=64 , snake_case_=2048 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=128 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[Any]:
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_layers
UpperCamelCase__ = num_heads
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = use_cache
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCamelCase__ = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __lowerCamelCase ( _a ):
a : Tuple ="""pix2struct_vision_model"""
def __init__( self , snake_case_=768 , snake_case_=768 , snake_case_=2048 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=4096 , snake_case_=32 , snake_case_=128 , **snake_case_ , ) -> Union[str, Any]:
super().__init__(**snake_case_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = patch_embed_hidden_size
UpperCamelCase__ = d_ff
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = initializer_range
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = dense_act_fn
UpperCamelCase__ = seq_len
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(snake_case_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCamelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class __lowerCamelCase ( _a ):
a : Any ="""pix2struct"""
a : Any =True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
UpperCamelCase__ = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCamelCase__ = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCamelCase__ = PixaStructTextConfig(**snake_case_ )
UpperCamelCase__ = PixaStructVisionConfig(**snake_case_ )
UpperCamelCase__ = self.text_config.decoder_start_token_id
UpperCamelCase__ = self.text_config.pad_token_id
UpperCamelCase__ = self.text_config.eos_token_id
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = initializer_range
UpperCamelCase__ = self.initializer_range
UpperCamelCase__ = self.initializer_range
UpperCamelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 708 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = 2
UpperCamelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 709 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str]= logging.get_logger(__name__)
class __lowerCamelCase ( _a ):
a : Optional[int] ="""timm_backbone"""
def __init__( self , snake_case_=None , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Dict:
super().__init__(**snake_case_ )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 20 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Dict= get_tests_dir("""fixtures/test_sentencepiece.model""")
A__ : Union[str, Any]= {"""target_lang""": """fi""", """source_lang""": """en"""}
A__ : int= """>>zh<<"""
A__ : List[str]= """Helsinki-NLP/"""
if is_torch_available():
A__ : Union[str, Any]= """pt"""
elif is_tf_available():
A__ : Dict= """tf"""
else:
A__ : Optional[Any]= """jax"""
@require_sentencepiece
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Tuple =MarianTokenizer
a : List[str] =False
a : int =True
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
super().setUp()
UpperCamelCase__ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
UpperCamelCase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Dict:
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = '</s>'
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(snake_case_ ) , 9 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
UpperCamelCase__ = en_de_tokenizer(['I am a small frog'] , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
UpperCamelCase__ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(snake_case_ , batch.input_ids[0] )
UpperCamelCase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case_ )
UpperCamelCase__ = [x.name for x in Path(snake_case_ ).glob('*' )]
self.assertIn('source.spm' , snake_case_ )
MarianTokenizer.from_pretrained(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tok(['I am a tiny frog', 'I am a small frog'] , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# fmt: off
UpperCamelCase__ = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
UpperCamelCase__ = 'Tämä on testi'
UpperCamelCase__ = 'This is a test'
UpperCamelCase__ = [76, 7, 2047, 2]
UpperCamelCase__ = [69, 12, 11, 940, 2]
UpperCamelCase__ = tokenizer(snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer(text_target=snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 710 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
UpperCamelCase__ = parent
UpperCamelCase__ = 13
UpperCamelCase__ = 7
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = 99
UpperCamelCase__ = 384
UpperCamelCase__ = 2
UpperCamelCase__ = 4
UpperCamelCase__ = 37
UpperCamelCase__ = 'gelu'
UpperCamelCase__ = 0.1
UpperCamelCase__ = 0.1
UpperCamelCase__ = 512
UpperCamelCase__ = 16
UpperCamelCase__ = 2
UpperCamelCase__ = 0.02
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 128
UpperCamelCase__ = 2
UpperCamelCase__ = 9
UpperCamelCase__ = 1
UpperCamelCase__ = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertModel(config=snake_case_ )
UpperCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = TFConvBertForMaskedLM(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForSequenceClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFConvBertForMultipleChoice(config=snake_case_ )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFConvBertForTokenClassification(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = TFConvBertForQuestionAnswering(config=snake_case_ )
UpperCamelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : str =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Any =False
a : Dict =False
a : str =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = TFConvBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = True
if hasattr(snake_case_ , 'use_cache' ):
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
UpperCamelCase__ = os.path.join(snake_case_ , 'saved_model' , '1' )
UpperCamelCase__ = tf.keras.models.load_model(snake_case_ )
UpperCamelCase__ = model(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = outputs['encoder_hidden_states']
UpperCamelCase__ = outputs['encoder_attentions']
else:
UpperCamelCase__ = outputs['hidden_states']
UpperCamelCase__ = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
UpperCamelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
UpperCamelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
UpperCamelCase__ = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase__ = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
UpperCamelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
UpperCamelCase__ = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(snake_case_ )[0]
UpperCamelCase__ = [1, 6, 768]
self.assertEqual(output.shape , snake_case_ )
UpperCamelCase__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 20 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Dict =DebertaTokenizer
a : Optional[int] =True
a : Optional[Any] =DebertaTokenizerFast
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase__ = {'unk_token': '[UNK]'}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = 'lower newer'
UpperCamelCase__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = tokenizer('Hello' , 'World' )
UpperCamelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
UpperCamelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=snake_case_ )
UpperCamelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case_ )
UpperCamelCase__ = tokenizer.encode(
'sequence builders' , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
UpperCamelCase__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(snake_case_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase__ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
UpperCamelCase__ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
UpperCamelCase__ = tokenizer(snake_case_ , padding=snake_case_ )
UpperCamelCase__ = [tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) for seq in encoding['input_ids']]
# fmt: off
UpperCamelCase__ = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase__ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , snake_case_ )
for expected, decoded in zip(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
| 712 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 0 |
import argparse
from collections import defaultdict
import yaml
A__ : Optional[Any]= """docs/source/en/_toctree.yml"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase__ = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ = []
for duplicate_key in duplicates:
UpperCamelCase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
UpperCamelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase__ = api_doc[model_idx]['sections']
UpperCamelCase__ = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE ) if 'sections' in section]
UpperCamelCase__ = False
for idx, modality_doc in modalities_docs:
UpperCamelCase__ = modality_doc['sections']
UpperCamelCase__ = clean_model_doc_toc(SCREAMING_SNAKE_CASE )
if old_modality_doc != new_modality_doc:
UpperCamelCase__ = True
if overwrite:
UpperCamelCase__ = new_modality_doc
if diff:
if overwrite:
UpperCamelCase__ = model_doc
UpperCamelCase__ = api_doc
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
A__ : Union[str, Any]= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Union[str, Any]= parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=100 , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , ) -> Optional[int]:
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitModel(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=snake_case_ )
UpperCamelCase__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(snake_case_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( _a , unittest.TestCase ):
a : int =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(snake_case_ , snake_case_ )
UpperCamelCase__ = model_class(snake_case_ )
@jax.jit
def model_jitted(snake_case_ , **snake_case_ ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase_( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=snake_case_ )
# forward pass
UpperCamelCase__ = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8192)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1000)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=snake_case_ , return_tensors='np' )
# forward pass
UpperCamelCase__ = model(**snake_case_ )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 2_1841)
self.assertEqual(logits.shape , snake_case_ )
UpperCamelCase__ = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) )
UpperCamelCase__ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
| 20 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 714 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCamelCase :
def __init__( self ) -> Tuple:
UpperCamelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ , UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ , UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ , UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , snake_case_ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(snake_case_ , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> Any:
UpperCamelCase__ = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[Any]:
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE )
heap.node_position.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for _ in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE , heap.get_position(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Dict= int(input("""Enter number of edges: """).strip())
A__ : Dict= defaultdict(list)
for _ in range(edges_number):
A__ : Dict= [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 20 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=32 , snake_case_=True , ) -> List[str]:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size_divisor
UpperCamelCase__ = do_rescale
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[str] =GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size_divisor' ) )
self.assertTrue(hasattr(snake_case_ , 'resample' ) )
self.assertTrue(hasattr(snake_case_ , 'do_rescale' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 715 |
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : List[str]= None
try:
import msvcrt
except ImportError:
A__ : Dict= None
try:
import fcntl
except ImportError:
A__ : Dict= None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : Optional[int]= OSError
# Data
# ------------------------------------------------
A__ : str= [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
A__ : Union[str, Any]= """3.0.12"""
A__ : Any= None
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
global _logger
UpperCamelCase__ = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ ) -> Any:
UpperCamelCase__ = lock_file
return None
def __str__( self ) -> Optional[int]:
UpperCamelCase__ = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __lowerCamelCase :
def __init__( self , snake_case_ ) -> List[Any]:
UpperCamelCase__ = lock
return None
def __enter__( self ) -> int:
return self.lock
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
self.lock.release()
return None
class __lowerCamelCase :
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Dict:
UpperCamelCase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
UpperCamelCase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ = None
# The default timeout value.
UpperCamelCase__ = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ = 0
return None
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return self._lock_file
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = float(snake_case_ )
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=0.05 ) -> int:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCamelCase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ = id(self )
UpperCamelCase__ = self._lock_file
UpperCamelCase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ = id(self )
UpperCamelCase__ = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
UpperCamelCase__ = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ) -> List[Any]:
self.acquire()
return self
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
self.release()
return None
def __del__( self ) -> Optional[Any]:
self.release(force=snake_case_ )
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
UpperCamelCase__ = os.path.dirname(snake_case_ )
UpperCamelCase__ = str(hash(snake_case_ ) )
UpperCamelCase__ = filename[: max_length - len(snake_case_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
UpperCamelCase__ = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self._lock_file_fd
UpperCamelCase__ = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> int:
UpperCamelCase__ = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCamelCase__ = self._lock_file_fd
UpperCamelCase__ = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class __lowerCamelCase ( _a ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
UpperCamelCase__ = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
os.close(self._lock_file_fd )
UpperCamelCase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : Union[str, Any]= None
if msvcrt:
A__ : List[Any]= WindowsFileLock
elif fcntl:
A__ : Optional[Any]= UnixFileLock
else:
A__ : Dict= SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 716 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A__ : Union[str, Any]= logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Tuple:
UpperCamelCase__ = [file for file in os.listdir(snake_case_ ) if os.path.isfile(os.path.join(snake_case_ , snake_case_ ) )]
if identifier is not None:
UpperCamelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case_ , snake_case_ ):
for n_ in n_identifier:
UpperCamelCase__ = [file for file in files if n_ not in file]
else:
UpperCamelCase__ = [file for file in files if n_identifier not in file]
UpperCamelCase__ = ignore_files or []
ignore_files.append('__init__.py' )
UpperCamelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , snake_case_ )
if only_modules:
UpperCamelCase__ = file.split('.' )[0]
try:
UpperCamelCase__ = getattr(snake_case_ , snake_case_ )
UpperCamelCase__ = doctest.DocTestSuite(snake_case_ )
UpperCamelCase__ = unittest.TextTestRunner().run(snake_case_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCamelCase__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'modeling'
UpperCamelCase__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(snake_case_ , identifier=snake_case_ , ignore_files=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'tokenization'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = 'configuration'
self.analyze_directory(snake_case_ , identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = Path('src/transformers' )
UpperCamelCase__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(snake_case_ , n_identifier=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = Path('docs/source' )
UpperCamelCase__ = ['favicon.ico']
self.analyze_directory(snake_case_ , ignore_files=snake_case_ , only_modules=snake_case_ )
| 20 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 717 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=snake_case_ ).to(snake_case_ )
UpperCamelCase__ = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase__ = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCamelCase__ = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCamelCase__ = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
UpperCamelCase__ = -(labels.shape[-1] * loss.item())
UpperCamelCase__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 718 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCamelCase ( _a ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
UpperCamelCase__ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = model
UpperCamelCase__ = cache
UpperCamelCase__ = force
UpperCamelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 20 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Any= """▁"""
A__ : List[str]= get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCamelCase ( _a , unittest.TestCase ):
a : List[Any] =BertGenerationTokenizer
a : Union[str, Any] =False
a : List[str] =True
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
super().setUp()
UpperCamelCase__ = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = '<s>'
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(snake_case_ ) , 1002 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = BertGenerationTokenizer(snake_case_ , keep_accents=snake_case_ )
UpperCamelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] , )
UpperCamelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = 'Hello World!'
UpperCamelCase__ = [1_8536, 2260, 101]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase__ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase__ = ' '.join(snake_case_ )
UpperCamelCase__ = self.big_tokenizer.encode_plus(snake_case_ , return_tensors='pt' , return_token_type_ids=snake_case_ )
UpperCamelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=snake_case_ )
UpperCamelCase__ = BertGenerationConfig()
UpperCamelCase__ = BertGenerationEncoder(snake_case_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case_ )
model(**snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
# fmt: off
UpperCamelCase__ = {'input_ids': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 719 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A__ : List[Any]= logging.get_logger(__name__)
A__ : Tuple= OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A__ : Optional[Any]= _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase__ = model_type_to_module_name(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , '__name__' , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase__ = importlib.import_module('transformers' )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class __lowerCamelCase :
def __init__( self ) -> Any:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( cls , snake_case_ , **snake_case_ ) -> Optional[int]:
UpperCamelCase__ = kwargs.pop('config' , snake_case_ )
UpperCamelCase__ = kwargs.pop('trust_remote_code' , snake_case_ )
UpperCamelCase__ = True
UpperCamelCase__ , UpperCamelCase__ = ImageProcessingMixin.get_image_processor_dict(snake_case_ , **snake_case_ )
UpperCamelCase__ = config_dict.get('image_processor_type' , snake_case_ )
UpperCamelCase__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
UpperCamelCase__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase__ = config_dict.pop('feature_extractor_type' , snake_case_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
UpperCamelCase__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCamelCase__ = config_dict['auto_map']['AutoFeatureExtractor']
UpperCamelCase__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase__ = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.image_processor_type``
UpperCamelCase__ = getattr(snake_case_ , 'image_processor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
UpperCamelCase__ = image_processor_class_from_name(snake_case_ )
UpperCamelCase__ = image_processor_auto_map is not None
UpperCamelCase__ = image_processor_class is not None or type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase__ = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
UpperCamelCase__ = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
UpperCamelCase__ = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase__ = IMAGE_PROCESSOR_MAPPING[type(snake_case_ )]
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ) -> Any:
IMAGE_PROCESSOR_MAPPING.register(snake_case_ , snake_case_ )
| 720 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = SwinConfig()
UpperCamelCase__ = swin_name.split('_' )
UpperCamelCase__ = name_split[1]
UpperCamelCase__ = int(name_split[4] )
UpperCamelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 6, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__ = 96
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__ = 1_28
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (4, 8, 16, 32)
else:
UpperCamelCase__ = 1_92
UpperCamelCase__ = (2, 2, 18, 2)
UpperCamelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__ = 2_18_41
else:
UpperCamelCase__ = 10_00
UpperCamelCase__ = 'huggingface/label-files'
UpperCamelCase__ = 'imagenet-1k-id2label.json'
UpperCamelCase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
UpperCamelCase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = img_size
UpperCamelCase__ = num_classes
UpperCamelCase__ = embed_dim
UpperCamelCase__ = depths
UpperCamelCase__ = num_heads
UpperCamelCase__ = window_size
return config
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
UpperCamelCase__ = 'encoder.' + name
if "attn.proj" in name:
UpperCamelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
UpperCamelCase__ = 'layernorm.weight'
if name == "norm.bias":
UpperCamelCase__ = 'layernorm.bias'
if "head" in name:
UpperCamelCase__ = name.replace('head' , 'classifier' )
else:
UpperCamelCase__ = 'swin.' + name
return name
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__ = key.split('.' )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[3] )
UpperCamelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
UpperCamelCase__ = val
return orig_state_dict
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
UpperCamelCase__ = get_swin_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = SwinForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase__ = timm_model(inputs['pixel_values'] )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : Tuple= parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 20 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__ : List[str]= logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a : str
a : List[str]
a : Optional[List[str]]
@dataclass
class __lowerCamelCase :
a : List[int]
a : List[int]
a : Optional[List[int]] =None
a : Optional[List[int]] =None
class __lowerCamelCase ( _a ):
a : Optional[Any] ="""train"""
a : List[Any] ="""dev"""
a : Optional[Any] ="""test"""
class __lowerCamelCase :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_="[CLS]" , snake_case_=1 , snake_case_="[SEP]" , snake_case_=False , snake_case_=False , snake_case_=0 , snake_case_=0 , snake_case_=-100 , snake_case_=0 , snake_case_=True , ) -> List[InputFeatures]:
UpperCamelCase__ = {label: i for i, label in enumerate(snake_case_ )}
UpperCamelCase__ = []
for ex_index, example in enumerate(snake_case_ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' , snake_case_ , len(snake_case_ ) )
UpperCamelCase__ = []
UpperCamelCase__ = []
for word, label in zip(example.words , example.labels ):
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case_ ) > 0:
tokens.extend(snake_case_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCamelCase__ = tokenizer.num_special_tokens_to_add()
if len(snake_case_ ) > max_seq_length - special_tokens_count:
UpperCamelCase__ = tokens[: (max_seq_length - special_tokens_count)]
UpperCamelCase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCamelCase__ = [sequence_a_segment_id] * len(snake_case_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCamelCase__ = [cls_token] + tokens
UpperCamelCase__ = [pad_token_label_id] + label_ids
UpperCamelCase__ = [cls_token_segment_id] + segment_ids
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCamelCase__ = [1 if mask_padding_with_zero else 0] * len(snake_case_ )
# Zero-pad up to the sequence length.
UpperCamelCase__ = max_seq_length - len(snake_case_ )
if pad_on_left:
UpperCamelCase__ = ([pad_token] * padding_length) + input_ids
UpperCamelCase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCamelCase__ = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCamelCase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(snake_case_ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(snake_case_ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(snake_case_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(snake_case_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(snake_case_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase__ = None
features.append(
InputFeatures(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , label_ids=snake_case_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCamelCase ( _a ):
a : List[InputFeatures]
a : int =nn.CrossEntropyLoss().ignore_index
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_=False , snake_case_ = Split.train , ) -> Tuple:
# Load data features from cache or dataset file
UpperCamelCase__ = os.path.join(
snake_case_ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(snake_case_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ = cached_features_file + '.lock'
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
UpperCamelCase__ = torch.load(snake_case_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
UpperCamelCase__ = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase__ = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , snake_case_ )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , snake_case_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase :
a : List[InputFeatures]
a : int =-1_0_0
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_=False , snake_case_ = Split.train , ) -> List[Any]:
UpperCamelCase__ = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCamelCase__ = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCamelCase__ = tf.data.Dataset.from_generator(
snake_case_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
UpperCamelCase__ = tf.data.Dataset.from_generator(
snake_case_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , snake_case_ ) -> InputFeatures:
return self.features[i]
| 721 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = pentagonal_nums[j]
UpperCamelCase__ = pentagonal_i + pentagonal_j
UpperCamelCase__ = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : List[str] = '''MobileNetV1Config'''
# Base docstring
__lowerCAmelCase : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__lowerCAmelCase : int = [1, 1024, 7, 7]
# Image classification docstring
__lowerCAmelCase : Union[str, Any] = '''google/mobilenet_v1_1.0_224'''
__lowerCAmelCase : Dict = '''tabby, tabby cat'''
__lowerCAmelCase : List[str] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
snake_case_ : Tuple = {}
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : List[Any] = model.mobilenet_va
else:
snake_case_ : Union[str, Any] = model
snake_case_ : Union[str, Any] = """MobilenetV1/Conv2d_0/"""
snake_case_ : Dict = backbone.conv_stem.convolution.weight
snake_case_ : Optional[int] = backbone.conv_stem.normalization.bias
snake_case_ : Any = backbone.conv_stem.normalization.weight
snake_case_ : Optional[Any] = backbone.conv_stem.normalization.running_mean
snake_case_ : Any = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
snake_case_ : List[str] = i + 1
snake_case_ : Optional[int] = i * 2
snake_case_ : int = backbone.layer[pt_index]
snake_case_ : Any = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
snake_case_ : str = pointer.convolution.weight
snake_case_ : str = pointer.normalization.bias
snake_case_ : Optional[int] = pointer.normalization.weight
snake_case_ : Dict = pointer.normalization.running_mean
snake_case_ : Dict = pointer.normalization.running_var
snake_case_ : Optional[Any] = backbone.layer[pt_index + 1]
snake_case_ : List[str] = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
snake_case_ : List[str] = pointer.convolution.weight
snake_case_ : Tuple = pointer.normalization.bias
snake_case_ : List[str] = pointer.normalization.weight
snake_case_ : str = pointer.normalization.running_mean
snake_case_ : Union[str, Any] = pointer.normalization.running_var
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
snake_case_ : str = model.classifier.weight
snake_case_ : Any = model.classifier.bias
return tf_to_pt_map
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
snake_case_ : Optional[int] = tf.train.list_variables(__UpperCamelCase )
snake_case_ : Dict = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
snake_case_ : Any = tf.train.load_variable(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = array
# Build TF to PyTorch weights loading map
snake_case_ : str = _build_tf_to_pytorch_map(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
snake_case_ : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
snake_case_ : Optional[int] = np.transpose(__UpperCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
snake_case_ : Tuple = array.squeeze().transpose()
else:
snake_case_ : Optional[int] = np.transpose(__UpperCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
snake_case_ : Optional[Any] = torch.from_numpy(__UpperCamelCase )
tf_weights.pop(__UpperCamelCase , __UpperCamelCase )
tf_weights.pop(name + """/RMSProp""" , __UpperCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , __UpperCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , __UpperCamelCase )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCAmelCase ( __UpperCamelCase : torch.Tensor , __UpperCamelCase : nn.Convad ):
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = features.shape[-2:]
snake_case_ , snake_case_ : int = conv_layer.stride
snake_case_ , snake_case_ : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
snake_case_ : Any = max(kernel_height - stride_height , 0 )
else:
snake_case_ : Any = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
snake_case_ : Any = max(kernel_width - stride_width , 0 )
else:
snake_case_ : Optional[int] = max(kernel_width - (in_width % stride_width) , 0 )
snake_case_ : int = pad_along_width // 2
snake_case_ : Optional[Any] = pad_along_width - pad_left
snake_case_ : Any = pad_along_height // 2
snake_case_ : List[str] = pad_along_height - pad_top
snake_case_ : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__UpperCamelCase , __UpperCamelCase , """constant""" , 0.0 )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = 1 , _lowercase = 1 , _lowercase = False , _lowercase = True , _lowercase = True , ) -> None:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
snake_case_ : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
snake_case_ : Dict = nn.Convad(
in_channels=_lowercase , out_channels=_lowercase , kernel_size=_lowercase , stride=_lowercase , padding=_lowercase , groups=_lowercase , bias=_lowercase , padding_mode="""zeros""" , )
if use_normalization:
snake_case_ : Tuple = nn.BatchNormad(
num_features=_lowercase , eps=config.layer_norm_eps , momentum=0.9997 , affine=_lowercase , track_running_stats=_lowercase , )
else:
snake_case_ : Union[str, Any] = None
if use_activation:
if isinstance(_lowercase , _lowercase ):
snake_case_ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowercase ):
snake_case_ : Optional[int] = ACTaFN[config.hidden_act]
else:
snake_case_ : Union[str, Any] = config.hidden_act
else:
snake_case_ : Tuple = None
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
snake_case_ : Dict = apply_tf_padding(_lowercase , self.convolution )
snake_case_ : str = self.convolution(_lowercase )
if self.normalization is not None:
snake_case_ : List[str] = self.normalization(_lowercase )
if self.activation is not None:
snake_case_ : List[Any] = self.activation(_lowercase )
return features
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = MobileNetVaConfig
_lowerCamelCase = load_tf_weights_in_mobilenet_va
_lowerCamelCase = '''mobilenet_v1'''
_lowerCamelCase = '''pixel_values'''
_lowerCamelCase = False
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
if isinstance(_lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCAmelCase : List[str] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase : Optional[Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = True ) -> Tuple:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = config
snake_case_ : int = 3_2
snake_case_ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
snake_case_ : Union[str, Any] = MobileNetVaConvLayer(
_lowercase , in_channels=config.num_channels , out_channels=_lowercase , kernel_size=3 , stride=2 , )
snake_case_ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
snake_case_ : Union[str, Any] = nn.ModuleList()
for i in range(1_3 ):
snake_case_ : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
snake_case_ : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowercase , in_channels=_lowercase , out_channels=_lowercase , kernel_size=3 , stride=strides[i] , groups=_lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowercase , in_channels=_lowercase , out_channels=_lowercase , kernel_size=1 , ) )
snake_case_ : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
snake_case_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
snake_case_ : List[Any] = self.conv_stem(_lowercase )
snake_case_ : Union[str, Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
snake_case_ : int = layer_module(_lowercase )
if output_hidden_states:
snake_case_ : Optional[Any] = all_hidden_states + (hidden_states,)
snake_case_ : Tuple = hidden_states
if self.pooler is not None:
snake_case_ : Optional[Any] = torch.flatten(self.pooler(_lowercase ) , start_dim=1 )
else:
snake_case_ : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=_lowercase , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = config.num_labels
snake_case_ : Optional[Any] = MobileNetVaModel(_lowercase )
snake_case_ : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
snake_case_ : str = nn.Dropout(config.classifier_dropout_prob , inplace=_lowercase )
snake_case_ : List[str] = nn.Linear(_lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
snake_case_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Dict = self.mobilenet_va(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
snake_case_ : str = outputs.pooler_output if return_dict else outputs[1]
snake_case_ : Optional[int] = self.classifier(self.dropout(_lowercase ) )
snake_case_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ : Optional[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ : List[Any] = """single_label_classification"""
else:
snake_case_ : Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
snake_case_ : Any = MSELoss()
if self.num_labels == 1:
snake_case_ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ : Optional[int] = loss_fct(_lowercase , _lowercase )
elif self.config.problem_type == "single_label_classification":
snake_case_ : Dict = CrossEntropyLoss()
snake_case_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ : Tuple = BCEWithLogitsLoss()
snake_case_ : Optional[int] = loss_fct(_lowercase , _lowercase )
if not return_dict:
snake_case_ : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states , )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCAmelCase : Any = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__lowerCAmelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCAmelCase : Union[str, Any] = dict(zip(vocab, range(len(vocab))))
__lowerCAmelCase : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : str = Path(tmpdirname)
__lowerCAmelCase : Optional[Any] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__lowerCAmelCase : int = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__lowerCAmelCase : int = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__lowerCAmelCase : Optional[int] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCAmelCase : Optional[int] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCAmelCase : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__lowerCAmelCase : Tuple = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCAmelCase : str = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
import cmath
import math
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
snake_case_ : List[Any] = math.radians(__UpperCamelCase )
snake_case_ : Union[str, Any] = math.radians(__UpperCamelCase )
# Convert voltage and current to rectangular form
snake_case_ : Union[str, Any] = cmath.rect(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = cmath.rect(__UpperCamelCase , __UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ : int = bertabert.config.encoder.vocab_size
snake_case_ : Optional[Any] = tokenizer.sep_token_id
snake_case_ : List[str] = tokenizer.cls_token_id
snake_case_ : Union[str, Any] = 1_2_8
snake_case_ : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ : Any = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ : Optional[int] = train_dataset.select(range(3_2 ) )
snake_case_ : List[Any] = val_dataset.select(range(1_6 ) )
snake_case_ : Any = 4
def _map_to_encoder_decoder_inputs(_lowercase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ : List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_lowercase , max_length=5_1_2 )
snake_case_ : Any = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_lowercase , max_length=1_2_8 )
snake_case_ : Optional[Any] = inputs.input_ids
snake_case_ : Any = inputs.attention_mask
snake_case_ : Dict = outputs.input_ids
snake_case_ : Tuple = outputs.input_ids.copy()
snake_case_ : Dict = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ : List[str] = outputs.attention_mask
assert all(len(_lowercase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(_lowercase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowercase ):
snake_case_ : Optional[int] = pred.label_ids
snake_case_ : Union[str, Any] = pred.predictions
# all unnecessary tokens are removed
snake_case_ : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowercase ) )] ) / len(_lowercase )
return {"accuracy": accuracy}
# map train dataset
snake_case_ : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowercase , batch_size=_lowercase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ : int = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowercase , batch_size=_lowercase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ : str = self.get_auto_remove_tmp_dir()
snake_case_ : Any = SeqaSeqTrainingArguments(
output_dir=_lowercase , per_device_train_batch_size=_lowercase , per_device_eval_batch_size=_lowercase , predict_with_generate=_lowercase , evaluation_strategy="""steps""" , do_train=_lowercase , do_eval=_lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ : Dict = SeqaSeqTrainer(
model=_lowercase , args=_lowercase , compute_metrics=_compute_metrics , train_dataset=_lowercase , eval_dataset=_lowercase , tokenizer=_lowercase , )
# start training
trainer.train()
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''upernet'''
def __init__( self , _lowercase=None , _lowercase=5_1_2 , _lowercase=0.02 , _lowercase=[1, 2, 3, 6] , _lowercase=True , _lowercase=0.4 , _lowercase=3_8_4 , _lowercase=2_5_6 , _lowercase=1 , _lowercase=False , _lowercase=2_5_5 , **_lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[Any] = backbone_config.get("""model_type""" )
snake_case_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Any = config_class.from_dict(_lowercase )
snake_case_ : str = backbone_config
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Dict = initializer_range
snake_case_ : Union[str, Any] = pool_scales
snake_case_ : Optional[Any] = use_auxiliary_head
snake_case_ : Any = auxiliary_loss_weight
snake_case_ : List[Any] = auxiliary_in_channels
snake_case_ : str = auxiliary_channels
snake_case_ : Dict = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : int = loss_ignore_index
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.backbone_config.to_dict()
snake_case_ : Dict = self.__class__.model_type
return output
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
snake_case_ : int = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case_ : str = model(_lowercase )["""last_hidden_state"""]
snake_case_ : int = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
snake_case_ : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCAmelCase : Optional[int] = 4
__lowerCAmelCase : Union[str, Any] = 3
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = int(os.environ["""RANK"""] )
snake_case_ : int = int(os.environ["""WORLD_SIZE"""] )
snake_case_ : Union[str, Any] = ArgumentParser()
parser.add_argument("""--streaming""" , type=__UpperCamelCase )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase )
parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : int = args.streaming
snake_case_ : Any = args.num_workers
snake_case_ : List[Any] = {"""shards""": [F'shard_{shard_idx}' for shard_idx in range(__UpperCamelCase )]}
snake_case_ : Any = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
snake_case_ : Any = Dataset.from_list(list(__UpperCamelCase ) )
snake_case_ : List[Any] = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
snake_case_ : List[str] = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
snake_case_ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
snake_case_ : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
snake_case_ : Tuple = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[float] , __UpperCamelCase : Any ):
'''simple docstring'''
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__UpperCamelCase ):
print(F'{i}\t\t{d}' )
def __lowerCAmelCase ( __UpperCamelCase : list[dict[str, int]] , __UpperCamelCase : list[float] , __UpperCamelCase : int ):
'''simple docstring'''
for j in range(__UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : Any = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCAmelCase ( __UpperCamelCase : list[dict[str, int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = [float("""inf""" )] * vertex_count
snake_case_ : List[str] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : Tuple = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ : Optional[Any] = distance[u] + w
snake_case_ : Tuple = check_negative_cycle(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = int(input('''Enter number of vertices: ''').strip())
__lowerCAmelCase : List[Any] = int(input('''Enter number of edges: ''').strip())
__lowerCAmelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowerCAmelCase : List[str] = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowerCAmelCase : List[Any] = int(input('''\nEnter shortest path source:''').strip())
__lowerCAmelCase : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowerCAmelCase : int = 25_0004
__lowerCAmelCase : Union[str, Any] = 25_0020
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MBartTokenizer
_lowerCamelCase = MBartTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = MBartTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = MBartTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
snake_case_ : str = tokenizer_r.save_pretrained(_lowercase )
snake_case_ : Optional[Any] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case_ : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : List[str] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : List[str] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : str = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : List[Any] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : str = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : str = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Dict = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : Optional[Any] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = '''facebook/mbart-large-en-ro'''
_lowerCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCamelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCAmelCase__ ( cls ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case_ : Union[str, Any] = 1
return cls
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
snake_case_ : Tuple = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case_ : Optional[int] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _lowercase )
snake_case_ : int = 1_0
snake_case_ : str = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowercase )
self.assertEqual(len(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
snake_case_ : str = MBartTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors="""pt""" )
snake_case_ : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case_ : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="""pt""" )
snake_case_ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=1_0 , return_tensors="""pt""" )
snake_case_ : Tuple = targets["""input_ids"""]
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''segformer'''
def __init__( self , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[3_2, 6_4, 1_6_0, 2_5_6] , _lowercase=[7, 3, 3, 3] , _lowercase=[4, 2, 2, 2] , _lowercase=[1, 2, 5, 8] , _lowercase=[4, 4, 4, 4] , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=0.1 , _lowercase=1E-6 , _lowercase=2_5_6 , _lowercase=2_5_5 , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _lowercase , )
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[int] = num_encoder_blocks
snake_case_ : Union[str, Any] = depths
snake_case_ : Optional[int] = sr_ratios
snake_case_ : Dict = hidden_sizes
snake_case_ : str = patch_sizes
snake_case_ : Optional[int] = strides
snake_case_ : Union[str, Any] = mlp_ratios
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = classifier_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : Optional[int] = drop_path_rate
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[str] = decoder_hidden_size
snake_case_ : Optional[Any] = kwargs.get("""reshape_last_stage""" , _lowercase )
snake_case_ : List[str] = semantic_loss_ignore_index
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_2
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = 1.5
snake_case_ : Any = int(factor * num_class_images )
snake_case_ : Any = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=__UpperCamelCase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ : Dict = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ : Union[str, Any] = int(factor * num_images )
snake_case_ : str = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
snake_case_ : Optional[Any] = 0
snake_case_ : int = 0
snake_case_ : str = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F'{class_data_dir}/caption.txt' , """w""" ) as fa, open(F'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
F'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
snake_case_ : int = class_images[count]
count += 1
try:
snake_case_ : List[Any] = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
snake_case_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_0_0 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = None
# Automatically constructed
_lowerCamelCase = "dict"
_lowerCamelCase = None
_lowerCamelCase = field(default='''Translation''' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __call__( self ) -> List[Any]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
# Automatically constructed
_lowerCamelCase = "dict"
_lowerCamelCase = None
_lowerCamelCase = field(default='''TranslationVariableLanguages''' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = sorted(set(self.languages ) ) if self.languages else None
snake_case_ : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self ) -> str:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : int = set(self.languages )
if self.languages and set(_lowercase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(_lowercase ) - lang_set ) )}) are not in valid set ({", ".join(_lowercase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
snake_case_ : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(_lowercase , _lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
snake_case_ , snake_case_ : str = zip(*sorted(_lowercase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (IPNDMScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 50),)
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
snake_case_ : Tuple = {"""num_train_timesteps""": 1_0_0_0}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self , _lowercase=0 , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = dict(self.forward_default_kwargs )
snake_case_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _lowercase )
snake_case_ : List[Any] = self.dummy_sample
snake_case_ : Optional[Any] = 0.1 * sample
snake_case_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ : int = self.get_scheduler_config(**_lowercase )
snake_case_ : Union[str, Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
snake_case_ : str = dummy_past_residuals[:]
if time_step is None:
snake_case_ : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
snake_case_ : int = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
snake_case_ : Optional[Any] = dummy_past_residuals[:]
snake_case_ : Dict = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Dict = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Dict = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self , _lowercase=0 , **_lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = dict(self.forward_default_kwargs )
snake_case_ : Optional[int] = kwargs.pop("""num_inference_steps""" , _lowercase )
snake_case_ : int = self.dummy_sample
snake_case_ : Dict = 0.1 * sample
snake_case_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case_ : List[Any] = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : int = dummy_past_residuals[:]
if time_step is None:
snake_case_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
snake_case_ : Optional[Any] = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : List[str] = dummy_past_residuals[:]
snake_case_ : Tuple = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Optional[Any] = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case_ : Any = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Any = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , **_lowercase ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config(**_lowercase )
snake_case_ : Tuple = scheduler_class(**_lowercase )
snake_case_ : Optional[Any] = 1_0
snake_case_ : List[Any] = self.dummy_model()
snake_case_ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[str] = model(_lowercase , _lowercase )
snake_case_ : int = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : str = model(_lowercase , _lowercase )
snake_case_ : Optional[Any] = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = dict(self.forward_default_kwargs )
snake_case_ : Dict = kwargs.pop("""num_inference_steps""" , _lowercase )
for scheduler_class in self.scheduler_classes:
snake_case_ : Tuple = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_lowercase )
snake_case_ : str = self.dummy_sample
snake_case_ : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , """set_timesteps""" ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , """set_timesteps""" ):
snake_case_ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case_ : int = dummy_past_residuals[:]
snake_case_ : Optional[int] = scheduler.timesteps[5]
snake_case_ : Dict = scheduler.timesteps[6]
snake_case_ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Optional[int] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case_ : Optional[Any] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
snake_case_ : Union[str, Any] = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase , time_step=_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=_lowercase , time_step=_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.full_loop()
snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
__lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __lowerCAmelCase ( __UpperCamelCase : list[int] , __UpperCamelCase : tuple[int, ...] ):
'''simple docstring'''
snake_case_ : str = ""
snake_case_ : int
snake_case_ : int
snake_case_ : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) , __UpperCamelCase ):
snake_case_ : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def __lowerCAmelCase ( __UpperCamelCase : list[int] ):
'''simple docstring'''
snake_case_ : list[str] = []
for key in product(__UpperCamelCase , repeat=3 ):
snake_case_ : Dict = try_key(__UpperCamelCase , __UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def __lowerCAmelCase ( __UpperCamelCase : list[str] , __UpperCamelCase : str ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __lowerCAmelCase ( __UpperCamelCase : str = "p059_cipher.txt" ):
'''simple docstring'''
snake_case_ : list[int]
snake_case_ : list[str]
snake_case_ : str
snake_case_ : str
snake_case_ : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding="""utf-8""" )
snake_case_ : int = [int(__UpperCamelCase ) for number in data.strip().split(""",""" )]
snake_case_ : Tuple = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
snake_case_ : Optional[int] = filter_common_word(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
snake_case_ : Optional[int] = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCAmelCase : List[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(_lowercase ):
snake_case_ : Dict = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
with self.assertRaises(_lowercase ):
snake_case_ : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case_ : List[str] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
import PIL.Image
snake_case_ : int = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_lowercase ) as mock_cast_to_python_objects:
snake_case_ : Any = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
snake_case_ , snake_case_ : List[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _lowercase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[str] = pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase )
snake_case_ : Optional[Any] = pa.ipc.open_stream(__UpperCamelCase )
snake_case_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = pa.BufferOutputStream()
snake_case_ : List[Any] = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case_ , snake_case_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = pa.BufferOutputStream()
snake_case_ : Optional[Any] = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
snake_case_ , snake_case_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case_ : int = pa.BufferReader(output.getvalue() )
snake_case_ : Optional[int] = pa.ipc.open_stream(__UpperCamelCase )
snake_case_ : pa.Table = f.read_all()
snake_case_ : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCamelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
snake_case_ , snake_case_ : List[Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
snake_case_ , snake_case_ : Dict = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt="""split_name""" , check_duplicates=__UpperCamelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
snake_case_ , snake_case_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = pa.BufferOutputStream()
snake_case_ : int = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
snake_case_ , snake_case_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = pa.BufferOutputStream()
snake_case_ : str = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
snake_case_ , snake_case_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = pa.BufferOutputStream()
snake_case_ : Optional[int] = pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
snake_case_ , snake_case_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case_ : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCAmelCase ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
snake_case_ : Any = os.path.join(__UpperCamelCase , """test.arrow""" )
with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
snake_case_ , snake_case_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(__UpperCamelCase , 1 )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if pa.types.is_list(__UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Dict ):
'''simple docstring'''
if isinstance(lst[0] , __UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCamelCase )
else:
snake_case_ : Tuple = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : str = pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case_ : Any = copy.deepcopy(__UpperCamelCase )
snake_case_ : List[str] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : List[str] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = """mock://dataset-train.arrow"""
with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case_ , snake_case_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCamelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
snake_case_ , snake_case_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case_ : Optional[int] = pa.BufferReader(output.getvalue() )
snake_case_ : pa.Table = pq.read_table(__UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import PIL.Image
snake_case_ : int = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format="""png""" )
snake_case_ : List[str] = pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCamelCase , features=Features({"""image""": Image()} ) , embed_local_files=__UpperCamelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
snake_case_ : Tuple = pa.BufferReader(output.getvalue() )
snake_case_ : pa.Table = pq.read_table(__UpperCamelCase )
snake_case_ : List[Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __UpperCamelCase )
with open(__UpperCamelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__UpperCamelCase )] )
snake_case_ : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCamelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
from typing import Any
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : dict , __UpperCamelCase : dict , __UpperCamelCase : dict , ):
'''simple docstring'''
_validation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# Creates data structures and fill initial step
snake_case_ : dict = {}
snake_case_ : dict = {}
for state in states_space:
snake_case_ : List[str] = observations_space[0]
snake_case_ : Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case_ : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__UpperCamelCase ) ):
snake_case_ : List[Any] = observations_space[o]
snake_case_ : int = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case_ : int = """"""
snake_case_ : List[str] = -1
for k_state in states_space:
snake_case_ : List[str] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case_ : List[str] = probability
snake_case_ : List[Any] = k_state
# Update probabilities and pointers dicts
snake_case_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case_ : str = arg_max
# The final observation
snake_case_ : List[str] = observations_space[len(__UpperCamelCase ) - 1]
# argmax for given final observation
snake_case_ : int = """"""
snake_case_ : Optional[int] = -1
for k_state in states_space:
snake_case_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case_ : Any = probability
snake_case_ : Tuple = k_state
snake_case_ : Union[str, Any] = arg_max
# Process pointers backwards
snake_case_ : Tuple = last_state
snake_case_ : Optional[Any] = []
for o in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
result.append(__UpperCamelCase )
snake_case_ : Dict = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , ):
'''simple docstring'''
_validate_not_empty(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
_validate_lists(__UpperCamelCase , __UpperCamelCase )
_validate_dicts(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
_validate_list(__UpperCamelCase , """observations_space""" )
_validate_list(__UpperCamelCase , """states_space""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_object , __UpperCamelCase ):
snake_case_ : Any = F'{var_name} must be a list'
raise ValueError(__UpperCamelCase )
else:
for x in _object:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : str = F'{var_name} must be a list of strings'
raise ValueError(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , ):
'''simple docstring'''
_validate_dict(__UpperCamelCase , """initial_probabilities""" , __UpperCamelCase )
_validate_nested_dict(__UpperCamelCase , """transition_probabilities""" )
_validate_nested_dict(__UpperCamelCase , """emission_probabilities""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str ):
'''simple docstring'''
_validate_dict(_object , __UpperCamelCase , __UpperCamelCase )
for x in _object.values():
_validate_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : type , __UpperCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(_object , __UpperCamelCase ):
snake_case_ : str = F'{var_name} must be a dict'
raise ValueError(__UpperCamelCase )
if not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for x in _object ):
snake_case_ : Any = F'{var_name} all keys must be strings'
raise ValueError(__UpperCamelCase )
if not all(isinstance(__UpperCamelCase , __UpperCamelCase ) for x in _object.values() ):
snake_case_ : int = """nested dictionary """ if nested else """"""
snake_case_ : str = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = CanineTokenizer
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
snake_case_ : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> CanineTokenizer:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
snake_case_ : Any = 1_0_2_4
return tokenizer
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.canine_tokenizer
snake_case_ : Any = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
snake_case_ : str = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
snake_case_ : str = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
snake_case_ : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowercase , _lowercase )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.canine_tokenizer
snake_case_ : Dict = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
snake_case_ : str = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertIn("""token_type_ids""" , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.canine_tokenizer
snake_case_ : List[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
snake_case_ : Union[str, Any] = tokenizer(
text_target=_lowercase , max_length=3_2 , padding="""max_length""" , truncation=_lowercase , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
snake_case_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : str = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
snake_case_ : int = tokenizer.__class__.from_pretrained(_lowercase )
snake_case_ : int = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
shutil.rmtree(_lowercase )
snake_case_ : List[str] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : Tuple = tempfile.mkdtemp()
snake_case_ : Any = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case_ : Tuple = chr(0Xe0_07 )
additional_special_tokens.append(_lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
snake_case_ : Tuple = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
snake_case_ : Optional[Any] = tokenizer.__class__.from_pretrained(_lowercase )
snake_case_ : Any = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
self.assertIn(_lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
snake_case_ : str = tokenizer.__class__.from_pretrained(_lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : int = self.get_clean_sequence(_lowercase )
# a special token for Canine can be defined as follows:
snake_case_ : int = 0Xe0_05
snake_case_ : Dict = chr(_lowercase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[str] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowercase )
snake_case_ : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , input_encoded + special_token_id )
snake_case_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Any = chr(0Xe0_05 )
snake_case_ : Optional[int] = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
snake_case_ : Tuple = tokenizer.tokenize(_lowercase )
snake_case_ : Optional[Any] = tokenizer.tokenize(_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
self.assertEqual(len(_lowercase ) , 1 )
self.assertEqual(token_a[0] , _lowercase )
self.assertEqual(token_a[0] , _lowercase )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
snake_case_ : Tuple = 0Xe0_06
snake_case_ : Any = chr(_lowercase )
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowercase )
tokenizer.from_pretrained(_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
snake_case_ : Optional[int] = json.load(_lowercase )
with open(os.path.join(_lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
snake_case_ : str = json.load(_lowercase )
# a special token for Canine can be defined as follows:
snake_case_ : Tuple = 0Xe0_06
snake_case_ : List[Any] = chr(_lowercase )
snake_case_ : List[Any] = [new_token_a]
snake_case_ : List[str] = [new_token_a]
with open(os.path.join(_lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowercase , _lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ : Any = tokenizer_class.from_pretrained(_lowercase , extra_ids=0 )
self.assertIn(_lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
snake_case_ : Any = 0Xe0_07
snake_case_ : int = chr(_lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ : Tuple = [AddedToken(_lowercase , lstrip=_lowercase )]
snake_case_ : Dict = tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , extra_ids=0 )
self.assertIn(_lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """hello world"""
if self.space_between_special_tokens:
snake_case_ : str = """[CLS] hello world [SEP]"""
else:
snake_case_ : List[Any] = input
snake_case_ : str = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = tokenizer.decode(_lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowercase , [output, output.lower()] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
snake_case_ : int = """a"""
snake_case_ : Dict = ord(_lowercase )
for attr in attributes_list:
setattr(_lowercase , attr + """_id""" , _lowercase )
self.assertEqual(getattr(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(getattr(_lowercase , attr + """_id""" ) , _lowercase )
setattr(_lowercase , attr + """_id""" , _lowercase )
self.assertEqual(getattr(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(getattr(_lowercase , attr + """_id""" ) , _lowercase )
setattr(_lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens_ids""" ) , [] )
snake_case_ : Dict = 0Xe0_06
snake_case_ : Dict = chr(_lowercase )
setattr(_lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : int=None , __UpperCamelCase : Dict=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
_lowerCamelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
_lowerCamelCase = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Benchmark training of model'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Verbose memory tracing'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Trace memory line by line'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Save result to a CSV file'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Save all print statements in a log file'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to print environment information'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
_lowerCamelCase = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
_lowerCamelCase = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
_lowerCamelCase = field(
default=f'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
_lowerCamelCase = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
_lowerCamelCase = field(
default=f'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
_lowerCamelCase = field(
default=f'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
_lowerCamelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , _lowercase , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = XCLIPTextConfig()
# derive patch size from model name
snake_case_ : List[str] = model_name.find("""patch""" )
snake_case_ : Optional[Any] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ : Tuple = XCLIPVisionConfig(patch_size=__UpperCamelCase , num_frames=__UpperCamelCase )
if "large" in model_name:
snake_case_ : int = 7_6_8
snake_case_ : Optional[int] = 3_0_7_2
snake_case_ : List[Any] = 1_2
snake_case_ : List[Any] = 1_0_2_4
snake_case_ : List[str] = 4_0_9_6
snake_case_ : Tuple = 1_6
snake_case_ : Optional[Any] = 2_4
snake_case_ : Any = 7_6_8
snake_case_ : Optional[Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
snake_case_ : int = 3_3_6
snake_case_ : str = XCLIPConfig.from_text_vision_configs(__UpperCamelCase , __UpperCamelCase )
if "large" in model_name:
snake_case_ : Any = 7_6_8
return config
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ : List[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ : Tuple = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ : str = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
snake_case_ : Any = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
snake_case_ : Dict = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
snake_case_ : Optional[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ : Tuple = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ : List[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ : List[str] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ : Optional[Any] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ : Optional[Any] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ : List[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ : List[Any] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ : str = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ : Optional[Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ : Dict = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ : Optional[Any] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ : Dict = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ : Optional[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ : Dict = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ : Tuple = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ : int = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ : str = orig_state_dict.pop(__UpperCamelCase )
if "attn.in_proj" in key:
snake_case_ : List[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ : Union[str, Any] = key_split[3]
snake_case_ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ : str = val[
:dim, :
]
snake_case_ : Dict = val[
dim : dim * 2, :
]
snake_case_ : Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ : Tuple = val[
:dim
]
snake_case_ : int = val[
dim : dim * 2
]
snake_case_ : Any = val[
-dim:
]
else:
if "weight" in key:
snake_case_ : Optional[Any] = val[
:dim, :
]
snake_case_ : List[str] = val[
dim : dim * 2, :
]
snake_case_ : str = val[
-dim:, :
]
else:
snake_case_ : List[Any] = val[:dim]
snake_case_ : List[Any] = val[
dim : dim * 2
]
snake_case_ : Any = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ : Tuple = key_split[2]
snake_case_ : Any = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : str = val[dim : dim * 2, :]
snake_case_ : Optional[int] = val[-dim:, :]
else:
snake_case_ : int = val[:dim]
snake_case_ : Optional[Any] = val[dim : dim * 2]
snake_case_ : int = val[-dim:]
else:
snake_case_ : Any = key_split[2]
snake_case_ : int = config.text_config.hidden_size
if "weight" in key:
snake_case_ : List[str] = val[:dim, :]
snake_case_ : List[str] = val[
dim : dim * 2, :
]
snake_case_ : Tuple = val[-dim:, :]
else:
snake_case_ : Tuple = val[:dim]
snake_case_ : int = val[
dim : dim * 2
]
snake_case_ : Tuple = val[-dim:]
else:
snake_case_ : str = rename_key(__UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ : Tuple = val.T
snake_case_ : Any = val
return orig_state_dict
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if num_frames == 8:
snake_case_ : Optional[int] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 1_6:
snake_case_ : Tuple = """eating_spaghetti.npy"""
elif num_frames == 3_2:
snake_case_ : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
snake_case_ : str = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=__UpperCamelCase , repo_type="""dataset""" , )
snake_case_ : str = np.load(__UpperCamelCase )
return list(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=False ):
'''simple docstring'''
snake_case_ : int = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ : Optional[int] = model_to_url[model_name]
snake_case_ : str = 8
if "16-frames" in model_name:
snake_case_ : str = 1_6
elif "shot" in model_name:
snake_case_ : Optional[int] = 3_2
snake_case_ : Optional[Any] = get_xclip_config(__UpperCamelCase , __UpperCamelCase )
snake_case_ : List[str] = XCLIPModel(__UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ : Any = """pytorch_model.bin"""
gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase )
snake_case_ : List[Any] = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
else:
snake_case_ : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase )["""model"""]
snake_case_ : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
snake_case_ : int = XCLIPModel(__UpperCamelCase )
snake_case_ , snake_case_ : int = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ : Union[str, Any] = 3_3_6 if model_name == """xclip-large-patch14-16-frames""" else 2_2_4
snake_case_ : List[Any] = VideoMAEImageProcessor(size=__UpperCamelCase )
snake_case_ : List[str] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ : Tuple = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ : List[str] = XCLIPProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
snake_case_ : List[str] = prepare_video(__UpperCamelCase )
snake_case_ : List[str] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ : str = model(**__UpperCamelCase )
# Verify outputs
snake_case_ : Optional[Any] = outputs.logits_per_video
snake_case_ : List[Any] = logits_per_video.softmax(dim=1 )
print("""Probs:""" , __UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ : Optional[Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ : List[str] = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case_ : int = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ : Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case_ : int = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ : str = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ : Optional[Any] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ : Optional[Any] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ : Dict = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ : List[str] = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ : Optional[int] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ : str = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ : Optional[Any] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ : Optional[int] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ : List[str] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ : Tuple = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ : List[Any] = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ : Union[str, Any] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(F'Model name {model_name} not supported' )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
processor.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(__UpperCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''spiece.model'''}
__lowerCAmelCase : Optional[Any] = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = None , **_lowercase , ) -> None:
'''simple docstring'''
snake_case_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ : Optional[int] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
snake_case_ : Optional[int] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
snake_case_ : Tuple = """<|endoftext|>""" if eos_token is None else eos_token
snake_case_ : str = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
snake_case_ : str = unk_token if pad_token is None else pad_token
snake_case_ : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
snake_case_ : Tuple = """<pad>""" if pad_token is None else pad_token
snake_case_ : Tuple = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
snake_case_ : List[str] = do_lower_case
snake_case_ : int = remove_space
snake_case_ : str = keep_accents
snake_case_ : List[str] = vocab_file
snake_case_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# Used for whitespace normalization in input texts
# fmt : off
snake_case_ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
snake_case_ : Dict = re.compile(
f'[{"".join(map(_lowercase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.__dict__.copy()
snake_case_ : List[str] = None
return state
def __setstate__( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ : Any = {}
snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : str = self.non_printing_characters_re.sub("""""" , _lowercase )
# Normalize whitespaces
snake_case_ : Any = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
snake_case_ : Optional[int] = unicodedata.normalize("""NFC""" , _lowercase )
return text
def UpperCAmelCase__ ( self , _lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.preprocess_text(_lowercase )
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(_lowercase )
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> str:
'''simple docstring'''
return out_string
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = """"""
snake_case_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
snake_case_ : List[Any] = True
snake_case_ : str = []
else:
current_sub_tokens.append(_lowercase )
snake_case_ : List[Any] = False
out_string += self.sp_model.decode(_lowercase )
return out_string
def UpperCAmelCase__ ( self ) -> Dict[str, int]:
'''simple docstring'''
snake_case_ : Dict = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : List[Any] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , """wb""" ) as fi:
snake_case_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
snake_case_ : Optional[int] = self.preprocess_text(_lowercase )
snake_case_ : int = self.sp_model.encode(_lowercase )
else:
snake_case_ : str = [self.preprocess_text(_lowercase ) for t in text]
snake_case_ : List[Any] = self.sp_model.encode(_lowercase )
if return_tensors is True or return_tensors == "pt":
snake_case_ : List[str] = torch.tensor(_lowercase )
return token_ids
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
return self.sp_model.decode(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
snake_case_ : Dict = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(_lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=_lowercase )
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCAmelCase : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
_lowerCamelCase = 10_000
_lowerCamelCase = None
_lowerCamelCase = None
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCamelCase = ParquetConfig
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowercase , (str, list, tuple) ):
snake_case_ : Union[str, Any] = data_files
if isinstance(_lowercase , _lowercase ):
snake_case_ : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ : str = [dl_manager.iter_files(_lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case_ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase ):
snake_case_ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ : List[str] = [dl_manager.iter_files(_lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowercase ):
with open(_lowercase , """rb""" ) as f:
snake_case_ : List[str] = datasets.Features.from_arrow_schema(pq.read_schema(_lowercase ) )
break
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self , _lowercase ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ : Dict = table_cast(_lowercase , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase ) ):
with open(_lowercase , """rb""" ) as f:
snake_case_ : int = pq.ParquetFile(_lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case_ : Tuple = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(_lowercase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowercase )}: {e}' )
raise
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase=1_2_5 , _lowercase=None , **_lowercase , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Dict = [f'<extra_id_{i}>' for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : Optional[Any] = len(set(filter(lambda _lowercase : bool("""extra_id""" in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
snake_case_ : List[str] = extra_ids
snake_case_ : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
snake_case_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case_ : int = len(self.special_tokens_encoder )
snake_case_ : Tuple = len(_lowercase )
for i, token in enumerate(_lowercase ):
snake_case_ : List[str] = self.vocab_size + i - n
snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
if len(_lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = self._add_eos_if_not_present(_lowercase )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : List[Any] = self._add_eos_if_not_present(_lowercase )
return token_ids_a + token_ids_a
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : int = [chr(_lowercase ) for i in text.encode("""utf-8""" )]
return tokens
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
if token in self.special_tokens_encoder:
snake_case_ : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case_ : List[Any] = self.added_tokens_encoder[token]
elif len(_lowercase ) != 1:
snake_case_ : List[Any] = self.unk_token_id
else:
snake_case_ : List[Any] = ord(_lowercase ) + self._num_special_tokens
return token_id
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
if index in self.special_tokens_decoder:
snake_case_ : Dict = self.special_tokens_decoder[index]
else:
snake_case_ : Any = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case_ : List[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case_ : Optional[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case_ : Optional[Any] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case_ : Optional[int] = token.encode("""utf-8""" )
else:
snake_case_ : Optional[Any] = bytes([ord(_lowercase )] )
bstring += tok_string
snake_case_ : List[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
return ()
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase : Any = random.Random()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : str=None , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
snake_case_ : Union[str, Any] = global_rng
snake_case_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=4_0_0 , _lowercase=2_0_0_0 , _lowercase=1_0 , _lowercase=1_6_0 , _lowercase=8 , _lowercase=0.0 , _lowercase=4_0_0_0 , _lowercase=False , _lowercase=True , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[int] = min_seq_length
snake_case_ : Optional[Any] = max_seq_length
snake_case_ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : str = padding_value
snake_case_ : Tuple = sampling_rate
snake_case_ : List[str] = return_attention_mask
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = feature_size
snake_case_ : List[Any] = chunk_length
snake_case_ : int = hop_length
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self , _lowercase=False , _lowercase=False ) -> List[str]:
'''simple docstring'''
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
snake_case_ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : List[Any] = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Any = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Any = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(_lowercase )
snake_case_ : List[Any] = feat_extract_first.to_dict()
snake_case_ : Tuple = feat_extract_second.to_dict()
snake_case_ : Tuple = feat_extract_first.mel_filters
snake_case_ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[str] = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
snake_case_ : Tuple = self.feature_extraction_class.from_json_file(_lowercase )
snake_case_ : Dict = feat_extract_first.to_dict()
snake_case_ : str = feat_extract_second.to_dict()
snake_case_ : Any = feat_extract_first.mel_filters
snake_case_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ : Union[str, Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : List[str] = feature_extractor(_lowercase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
snake_case_ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
snake_case_ : int = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : Optional[int] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ : Tuple = np.asarray(_lowercase )
snake_case_ : Dict = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test truncation required
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case_ : str = [np.asarray(_lowercase ) for speech_input in speech_inputs]
snake_case_ : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : List[Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs_truncated]
snake_case_ : Optional[Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
import torch
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Tuple = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
snake_case_ : Optional[int] = ds.sort("""id""" ).select(range(_lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
snake_case_ : Tuple = self._load_datasamples(1 )
snake_case_ : Dict = WhisperFeatureExtractor()
snake_case_ : List[str] = feature_extractor(_lowercase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , _lowercase , atol=1E-4 ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Dict = self._load_datasamples(1 )[0]
snake_case_ : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case_ : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowercase )[0]
self.assertTrue(np.all(np.mean(_lowercase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase ) - 1 ) < 1E-3 ) )
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase = "" , _lowercase = False ) -> None:
'''simple docstring'''
snake_case_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case_ : int = is_leaf
snake_case_ : Union[str, Any] = prefix
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[str, str, str]:
'''simple docstring'''
snake_case_ : Optional[int] = 0
for q, w in zip(self.prefix , _lowercase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
for word in words:
self.insert(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
if self.prefix == word:
snake_case_ : Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case_ : Optional[int] = RadixNode(prefix=_lowercase , is_leaf=_lowercase )
else:
snake_case_ : List[Any] = self.nodes[word[0]]
snake_case_ , snake_case_ , snake_case_ : Any = incoming_node.match(
_lowercase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_lowercase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case_ : Any = remaining_prefix
snake_case_ : int = self.nodes[matching_string[0]]
snake_case_ : int = RadixNode(_lowercase , _lowercase )
snake_case_ : List[Any] = aux_node
if remaining_word == "":
snake_case_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> bool:
'''simple docstring'''
snake_case_ : List[str] = self.nodes.get(word[0] , _lowercase )
if not incoming_node:
return False
else:
snake_case_ , snake_case_ , snake_case_ : Optional[int] = incoming_node.match(
_lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> bool:
'''simple docstring'''
snake_case_ : List[str] = self.nodes.get(word[0] , _lowercase )
if not incoming_node:
return False
else:
snake_case_ , snake_case_ , snake_case_ : Optional[int] = incoming_node.match(
_lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_lowercase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case_ : List[str] = list(self.nodes.values() )[0]
snake_case_ : Dict = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case_ : List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case_ : int = False
# If there is 1 edge, we merge it with its child
else:
snake_case_ : Tuple = list(incoming_node.nodes.values() )[0]
snake_case_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case_ : Union[str, Any] = merging_node.nodes
return True
def UpperCAmelCase__ ( self , _lowercase = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = """banana bananas bandana band apple all beast""".split()
snake_case_ : List[str] = RadixNode()
root.insert_many(__UpperCamelCase )
assert all(root.find(__UpperCamelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert test_trie()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = RadixNode()
snake_case_ : Any = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__UpperCamelCase )
print("""Words:""" , __UpperCamelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCAmelCase : Optional[Any] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
__lowerCAmelCase : str = dataset.iloc[:, 1:2].values
__lowerCAmelCase : Optional[int] = dataset.iloc[:, 2].values
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCAmelCase : Dict = PolynomialFeatures(degree=4)
__lowerCAmelCase : int = poly_reg.fit_transform(X)
__lowerCAmelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCAmelCase ( ):
'''simple docstring'''
plt.scatter(__UpperCamelCase , __UpperCamelCase , color="""red""" )
plt.plot(__UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(__UpperCamelCase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ , snake_case_ : str = analyze_text(__UpperCamelCase )
snake_case_ : Any = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
snake_case_ : str = sum(single_char_strings.values() )
# one length string
snake_case_ : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case_ : str = single_char_strings[ch]
snake_case_ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCamelCase ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
snake_case_ : str = sum(two_char_strings.values() )
snake_case_ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case_ : str = cha + cha
if sequence in two_char_strings:
snake_case_ : Any = two_char_strings[sequence]
snake_case_ : Optional[int] = int(__UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCamelCase )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = Counter() # type: ignore
snake_case_ : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
snake_case_ : Optional[Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("""RGB""" )
return image
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = dct.pop(__UpperCamelCase )
snake_case_ : Optional[int] = val
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ : Union[str, Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
snake_case_ : Optional[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
snake_case_ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(__UpperCamelCase , requires_grad=__UpperCamelCase ), v_bias) )
snake_case_ : Optional[Any] = qkv_bias
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = 3_6_4 if """coco""" in model_name else 2_2_4
snake_case_ : str = InstructBlipVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
snake_case_ : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ : int = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
snake_case_ : int = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
snake_case_ : Union[str, Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
snake_case_ : str = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
snake_case_ : List[str] = InstructBlipConfig(vision_config=__UpperCamelCase , text_config=__UpperCamelCase , qformer_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
snake_case_ : Tuple = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
snake_case_ : Any = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
snake_case_ , snake_case_ : Union[str, Any] = get_blipa_config(__UpperCamelCase )
snake_case_ : Optional[Any] = InstructBlipForConditionalGeneration(__UpperCamelCase ).eval()
snake_case_ : Tuple = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
snake_case_ , snake_case_ : int = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
snake_case_ : List[Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
snake_case_ : Dict = """cuda:2""" if torch.cuda.is_available() else """cpu"""
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = load_model_and_preprocess(
name=__UpperCamelCase , model_type=__UpperCamelCase , is_eval=__UpperCamelCase , device=__UpperCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
snake_case_ : Any = original_model.state_dict()
snake_case_ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ : int = state_dict.pop(__UpperCamelCase )
if key.startswith("""Qformer.bert""" ):
snake_case_ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
snake_case_ : Tuple = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
snake_case_ : Dict = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
snake_case_ : Dict = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
snake_case_ : Union[str, Any] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
snake_case_ : List[str] = key.replace("""t5""" , """language""" )
snake_case_ : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase , __UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
snake_case_ : Dict = load_demo_image()
snake_case_ : Optional[Any] = """What is unusual about this image?"""
# create processor
snake_case_ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCamelCase , image_std=__UpperCamelCase )
snake_case_ : List[str] = InstructBlipProcessor(
image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase , qformer_tokenizer=__UpperCamelCase , )
snake_case_ : int = processor(images=__UpperCamelCase , text=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# make sure processor creates exact same pixel values
snake_case_ : Tuple = vis_processors["""eval"""](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case_ : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
snake_case_ : Any = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
snake_case_ : Dict = hf_model(**__UpperCamelCase ).logits
else:
snake_case_ : Union[str, Any] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
snake_case_ : List[Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
snake_case_ : int = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
snake_case_ : int = hf_model(**__UpperCamelCase , labels=__UpperCamelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
snake_case_ : str = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __UpperCamelCase , atol=__UpperCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
snake_case_ : int = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
snake_case_ : str = hf_model.generate(
**__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
snake_case_ : Any = 2
print("""Original generation:""" , __UpperCamelCase )
snake_case_ : List[Any] = processor.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
snake_case_ : str = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
__lowerCAmelCase : Optional[Any] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__lowerCAmelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def __lowerCAmelCase ( __UpperCamelCase : Any="ro" , __UpperCamelCase : Union[str, Any]="en" , __UpperCamelCase : Union[str, Any]="wmt16" , __UpperCamelCase : int=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
snake_case_ : Tuple = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}' )
snake_case_ : str = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase )
if save_dir is None:
snake_case_ : List[Any] = F'{dataset}-{pair}'
snake_case_ : List[str] = Path(__UpperCamelCase )
save_dir.mkdir(exist_ok=__UpperCamelCase )
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
snake_case_ : Union[str, Any] = """val""" if split == """validation""" else split
snake_case_ : List[str] = save_dir.joinpath(F'{fn}.source' )
snake_case_ : Optional[Any] = save_dir.joinpath(F'{fn}.target' )
snake_case_ : List[str] = src_path.open("""w+""" )
snake_case_ : Dict = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case_ : int = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''roformer'''
def __init__( self , _lowercase=5_0_0_0_0 , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_5_3_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=0 , _lowercase=False , _lowercase=True , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size if embedding_size is None else embedding_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : str = hidden_act
snake_case_ : Any = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Any = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = rotary_value
snake_case_ : List[Any] = use_cache
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
snake_case_ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase : List[Any] = '''src/transformers'''
__lowerCAmelCase : Dict = '''docs/source/en'''
__lowerCAmelCase : Optional[Any] = '''.'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : int = f.readlines()
# Find the start prompt.
snake_case_ : int = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
snake_case_ : Union[str, Any] = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase : int = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__lowerCAmelCase : Any = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : int = direct_transformers_import(TRANSFORMERS_PATH)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , __UpperCamelCase )
return [m.group(0 ) for m in matches]
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Dict = 2 if text == """✅""" or text == """❌""" else len(__UpperCamelCase )
snake_case_ : Tuple = (width - text_length) // 2
snake_case_ : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
snake_case_ : List[str] = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
snake_case_ : Dict = collections.defaultdict(__UpperCamelCase )
snake_case_ : List[Any] = collections.defaultdict(__UpperCamelCase )
snake_case_ : str = collections.defaultdict(__UpperCamelCase )
snake_case_ : str = collections.defaultdict(__UpperCamelCase )
snake_case_ : Dict = collections.defaultdict(__UpperCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__UpperCamelCase ):
snake_case_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
snake_case_ : Dict = slow_tokenizers
snake_case_ : Any = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
snake_case_ : Optional[Any] = fast_tokenizers
snake_case_ : str = attr_name[:-1_3]
elif _re_tf_models.match(__UpperCamelCase ) is not None:
snake_case_ : List[Any] = tf_models
snake_case_ : Tuple = _re_tf_models.match(__UpperCamelCase ).groups()[0]
elif _re_flax_models.match(__UpperCamelCase ) is not None:
snake_case_ : Dict = flax_models
snake_case_ : Tuple = _re_flax_models.match(__UpperCamelCase ).groups()[0]
elif _re_pt_models.match(__UpperCamelCase ) is not None:
snake_case_ : List[Any] = pt_models
snake_case_ : Any = _re_pt_models.match(__UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__UpperCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
snake_case_ : Any = True
break
# Try again after removing the last word in the name
snake_case_ : Tuple = """""".join(camel_case_split(__UpperCamelCase )[:-1] )
# Let's build that table!
snake_case_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
snake_case_ : str = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
snake_case_ : str = [len(__UpperCamelCase ) + 2 for c in columns]
snake_case_ : Dict = max([len(__UpperCamelCase ) for name in model_names] ) + 2
# Build the table per se
snake_case_ : List[Any] = """|""" + """|""".join([_center_text(__UpperCamelCase , __UpperCamelCase ) for c, w in zip(__UpperCamelCase , __UpperCamelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
snake_case_ : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
snake_case_ : Union[str, Any] = model_name_to_prefix[name]
snake_case_ : Optional[int] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__UpperCamelCase , __UpperCamelCase ) for l, w in zip(__UpperCamelCase , __UpperCamelCase )] ) + "|\n"
return table
def __lowerCAmelCase ( __UpperCamelCase : Any=False ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = _find_text_in_file(
filename=os.path.join(__UpperCamelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
snake_case_ : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__UpperCamelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowerCAmelCase : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ''''''
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowercase = "" , _lowercase = None , _lowercase = None , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Dict = fsspec.open(
_lowercase , mode="""rb""" , protocol=_lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : List[str] = os.path.basename(self.file.path.split("""::""" )[0] )
snake_case_ : str = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case_ : Any = None
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> List[Any]:
'''simple docstring'''
return super()._strip_protocol(_lowercase ).lstrip("""/""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Any = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
snake_case_ : Dict = {f["""name"""]: f}
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.file.open().read()
def UpperCAmelCase__ ( self , _lowercase , _lowercase = "rb" , _lowercase=None , _lowercase=True , _lowercase=None , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : str = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''.bz2'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''.gz'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''.lz4'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''.xz'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''.zst'''
def __init__( self , _lowercase , _lowercase = "rb" , _lowercase = None , _lowercase = None , _lowercase = DEFAULT_BLOCK_SIZE , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Union[str, Any] = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = file_
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self ) -> Dict:
'''simple docstring'''
return iter(self._file )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return next(self._file )
def __getattr__( self , _lowercase ) -> Any:
'''simple docstring'''
return getattr(self._file , _lowercase )
def fixed_enter(*_lowercase , **_lowercase ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
snake_case_ : List[str] = fixed_enter
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowerCAmelCase ( *__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : List[Any] = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Dict = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowerCAmelCase ( __UpperCamelCase : Exception ):
'''simple docstring'''
snake_case_ : List[str] = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowerCAmelCase ( __UpperCamelCase : callable = None , __UpperCamelCase : int = 1_2_8 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
snake_case_ : List[str] = starting_batch_size
def decorator(*__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ : Tuple = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
snake_case_ : str = """, """.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 21 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 1 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
snake_case_ : Union[str, Any] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __UpperCamelCase )
if matches:
snake_case_ : str = float(matches[1] )
snake_case_ : List[str] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case_ : str = 1_0_0_1
snake_case_ : Optional[int] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[int] = """huggingface/label-files"""
snake_case_ : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Optional[Any] = {int(__UpperCamelCase ) + 1: v for k, v in idalabel.items()}
snake_case_ : Dict = """background"""
snake_case_ : Any = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
snake_case_ : List[Any] = get_mobilenet_va_config(__UpperCamelCase )
# Load 🤗 model
snake_case_ : int = MobileNetVaForImageClassification(__UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case_ : List[str] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 3_2} , )
snake_case_ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ : Dict = model(**__UpperCamelCase )
snake_case_ : List[Any] = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
snake_case_ : Any = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case_ : Any = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
snake_case_ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
snake_case_ : int = """google/""" + model_name
image_processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 21 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = pad_token_id
snake_case_ : List[str] = max_length
snake_case_ : Dict = vocab
snake_case_ : Optional[int] = merges
snake_case_ : Dict = BytePairTokenizer(_lowercase , _lowercase , sequence_length=_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
snake_case_ : str = [""" """.join(_lowercase ) for m in tokenizer.bpe_ranks.keys()]
snake_case_ : List[str] = tokenizer.get_vocab()
return cls(_lowercase , _lowercase , *_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
snake_case_ : Any = GPTaTokenizer.from_pretrained(_lowercase , *_lowercase , **_lowercase )
return cls.from_tokenizer(_lowercase , *_lowercase , **_lowercase )
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> Dict:
'''simple docstring'''
return cls(**_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> int:
'''simple docstring'''
snake_case_ : str = self.tf_tokenizer(_lowercase )
snake_case_ : Dict = tf.ones_like(_lowercase )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case_ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case_ , snake_case_ : List[str] = pad_model_inputs(
_lowercase , max_seq_length=_lowercase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 21 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 1 |
"""simple docstring"""
from functools import reduce
__lowerCAmelCase : Optional[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCAmelCase ( __UpperCamelCase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCamelCase , __UpperCamelCase : str(int(__UpperCamelCase ) * int(__UpperCamelCase ) ) , n[i : i + 1_3] ) )
for i in range(len(__UpperCamelCase ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
snake_case_ : Optional[int] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("""RGB""" )
return image
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = dct.pop(__UpperCamelCase )
snake_case_ : Optional[int] = val
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : str ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ : Tuple = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
snake_case_ : Optional[int] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
snake_case_ : Any = torch.cat((q_bias, torch.zeros_like(__UpperCamelCase , requires_grad=__UpperCamelCase ), v_bias) )
snake_case_ : Optional[Any] = qkv_bias
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = 3_6_4 if """coco""" in model_name else 2_2_4
snake_case_ : List[Any] = BlipaVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ : str = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__UpperCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ : str = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__UpperCamelCase ).to_dict()
elif "t5-xl" in model_name:
snake_case_ : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ : Any = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
snake_case_ : Tuple = BlipaConfig(vision_config=__UpperCamelCase , text_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Dict = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
snake_case_ : Dict = tokenizer("""\n""" , add_special_tokens=__UpperCamelCase ).input_ids[0]
snake_case_ , snake_case_ : List[str] = get_blipa_config(__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ : Tuple = BlipaForConditionalGeneration(__UpperCamelCase ).eval()
snake_case_ : Any = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
snake_case_ , snake_case_ : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
snake_case_ : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
snake_case_ , snake_case_ , snake_case_ : List[Any] = load_model_and_preprocess(
name=__UpperCamelCase , model_type=__UpperCamelCase , is_eval=__UpperCamelCase , device=__UpperCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
snake_case_ : Any = original_model.state_dict()
snake_case_ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ : Any = state_dict.pop(__UpperCamelCase )
if key.startswith("""Qformer.bert""" ):
snake_case_ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
snake_case_ : List[Any] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
snake_case_ : str = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
snake_case_ : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
snake_case_ : Dict = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
snake_case_ : Any = key.replace("""t5""" , """language""" )
snake_case_ : Optional[int] = val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase , __UpperCamelCase )
snake_case_ , snake_case_ : Any = hf_model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert len(__UpperCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ : List[str] = load_demo_image()
snake_case_ : Any = vis_processors["""eval"""](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case_ : Dict = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
# create processor
snake_case_ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCamelCase , image_std=__UpperCamelCase )
snake_case_ : List[str] = BlipaProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase )
snake_case_ : List[Any] = processor(images=__UpperCamelCase , return_tensors="""pt""" ).pixel_values.to(__UpperCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCamelCase , __UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "opt" in model_name:
snake_case_ : List[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
snake_case_ : Any = hf_model(__UpperCamelCase , __UpperCamelCase ).logits
else:
snake_case_ : Tuple = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
snake_case_ : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
snake_case_ : int = hf_model(__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ : Optional[Any] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=__UpperCamelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ : Any = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=__UpperCamelCase )
else:
# cast to same type
snake_case_ : List[Any] = logits.dtype
assert torch.allclose(original_logits.to(__UpperCamelCase ) , __UpperCamelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
snake_case_ : Optional[int] = """"""
snake_case_ : int = tokenizer(__UpperCamelCase , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
snake_case_ : Any = original_model.generate({"""image""": original_pixel_values} )
snake_case_ : Optional[int] = hf_model.generate(
__UpperCamelCase , __UpperCamelCase , do_sample=__UpperCamelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __UpperCamelCase )
snake_case_ : Optional[int] = input_ids.shape[1]
snake_case_ : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCamelCase )
snake_case_ : Union[str, Any] = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
__lowerCAmelCase : Optional[Any] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__lowerCAmelCase : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = False , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Embedding(_lowercase , _lowercase )
snake_case_ : str = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Dict = False
snake_case_ : Optional[int] = nn.Dropout(p=_lowercase )
snake_case_ : Any = TaConfig(
vocab_size=_lowercase , d_model=_lowercase , num_heads=_lowercase , d_kv=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , feed_forward_proj=_lowercase , is_decoder=_lowercase , is_encoder_decoder=_lowercase , )
snake_case_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(_lowercase ):
snake_case_ : Dict = TaBlock(_lowercase )
self.encoders.append(_lowercase )
snake_case_ : Tuple = TaLayerNorm(_lowercase )
snake_case_ : List[str] = nn.Dropout(p=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.token_embedder(_lowercase )
snake_case_ : Optional[Any] = encoder_input_tokens.shape[1]
snake_case_ : str = torch.arange(_lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.dropout_pre(_lowercase )
# inverted the attention mask
snake_case_ : Optional[int] = encoder_input_tokens.size()
snake_case_ : Optional[int] = self.get_extended_attention_mask(_lowercase , _lowercase )
for lyr in self.encoders:
snake_case_ : Union[str, Any] = lyr(_lowercase , _lowercase )[0]
snake_case_ : str = self.layer_norm(_lowercase )
return self.dropout_post(_lowercase ), encoder_inputs_mask
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : str = num_channels
snake_case_ : Dict = image_size
snake_case_ : Dict = min_resolution
snake_case_ : str = max_resolution
snake_case_ : Dict = do_resize
snake_case_ : List[str] = size
snake_case_ : Union[str, Any] = apply_ocr
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """apply_ocr""" ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , _lowercase )
self.assertIsInstance(encoding.boxes , _lowercase )
# Test batched
snake_case_ : Union[str, Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Union[str, Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case_ : Dict = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : Dict = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case_ : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : List[str] = image_processing(_lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : int = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowercase )
self.assertListEqual(encoding.boxes , _lowercase )
# with apply_OCR = False
snake_case_ : int = LayoutLMvaImageProcessor(apply_ocr=_lowercase )
snake_case_ : int = image_processing(_lowercase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 21 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 1 |
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = [False] * len(__UpperCamelCase )
snake_case_ : str = [s]
snake_case_ : Tuple = True
while queue:
snake_case_ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
snake_case_ : Dict = True
snake_case_ : Tuple = u
return visited[t]
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [-1] * (len(__UpperCamelCase ))
snake_case_ : Any = 0
snake_case_ : Optional[Any] = []
snake_case_ : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case_ : Dict = float("""Inf""" )
snake_case_ : List[Any] = sink
while s != source:
# Find the minimum value in select path
snake_case_ : Optional[int] = min(__UpperCamelCase , graph[parent[s]][s] )
snake_case_ : Dict = parent[s]
max_flow += path_flow
snake_case_ : Tuple = sink
while v != source:
snake_case_ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ : Optional[int] = parent[v]
for i in range(len(__UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ : list[Any] = []
snake_case_ : int = 0
snake_case_ : int = 0
def UpperCAmelCase__ ( self ) -> bool:
'''simple docstring'''
return self.head == self.tail
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
self.data.append(_lowercase )
snake_case_ : Union[str, Any] = self.tail + 1
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.data[self.head]
snake_case_ : Any = self.head + 1
return ret
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.tail - self.head
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = data
snake_case_ : MyNode | None = None
snake_case_ : MyNode | None = None
snake_case_ : int = 1
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.data
def UpperCAmelCase__ ( self ) -> MyNode | None:
'''simple docstring'''
return self.left
def UpperCAmelCase__ ( self ) -> MyNode | None:
'''simple docstring'''
return self.right
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self.height
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : List[str] = data
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : int = node
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : int = node
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Union[str, Any] = height
def __lowerCAmelCase ( __UpperCamelCase : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a > b:
return a
return b
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
print("""left rotation node:""" , node.get_data() )
snake_case_ : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCamelCase )
snake_case_ : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
snake_case_ : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
print("""right rotation node:""" , node.get_data() )
snake_case_ : Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCamelCase )
snake_case_ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
snake_case_ : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCamelCase )
return ret
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
snake_case_ : List[str] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCamelCase ) )
return right_rotation(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
snake_case_ : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCamelCase ) )
return left_rotation(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : MyNode | None , __UpperCamelCase : Any ):
'''simple docstring'''
if node is None:
return MyNode(__UpperCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __UpperCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
snake_case_ : int = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
snake_case_ : Optional[Any] = right_rotation(__UpperCamelCase )
else:
snake_case_ : List[str] = lr_rotation(__UpperCamelCase )
else:
node.set_right(insert_node(node.get_right() , __UpperCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
snake_case_ : Optional[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
snake_case_ : Optional[Any] = rl_rotation(__UpperCamelCase )
else:
snake_case_ : int = left_rotation(__UpperCamelCase )
snake_case_ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__UpperCamelCase )
return node
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
while True:
snake_case_ : Union[str, Any] = root.get_right()
if right_child is None:
break
snake_case_ : List[Any] = right_child
return root.get_data()
def __lowerCAmelCase ( __UpperCamelCase : MyNode ):
'''simple docstring'''
while True:
snake_case_ : List[str] = root.get_left()
if left_child is None:
break
snake_case_ : str = left_child
return root.get_data()
def __lowerCAmelCase ( __UpperCamelCase : MyNode , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : List[str] = root.get_left()
snake_case_ : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
snake_case_ : Tuple = get_left_most(__UpperCamelCase )
root.set_data(__UpperCamelCase )
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
elif left_child is not None:
snake_case_ : str = left_child
elif right_child is not None:
snake_case_ : Optional[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(__UpperCamelCase , __UpperCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCamelCase , __UpperCamelCase ) )
if get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
snake_case_ : List[Any] = left_rotation(__UpperCamelCase )
else:
snake_case_ : Optional[int] = rl_rotation(__UpperCamelCase )
elif get_height(__UpperCamelCase ) - get_height(__UpperCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
snake_case_ : Union[str, Any] = right_rotation(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = lr_rotation(__UpperCamelCase )
snake_case_ : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__UpperCamelCase )
return root
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ : MyNode | None = None
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return get_height(self.root )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
print("""insert:""" + str(_lowercase ) )
snake_case_ : List[str] = insert_node(self.root , _lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
print("""delete:""" + str(_lowercase ) )
if self.root is None:
print("""Tree is empty!""" )
return
snake_case_ : str = del_node(self.root , _lowercase )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
snake_case_ : Dict = """"""
snake_case_ : List[Any] = MyQueue()
q.push(self.root )
snake_case_ : Optional[Any] = self.get_height()
if layer == 0:
return output
snake_case_ : Union[str, Any] = 0
while not q.is_empty():
snake_case_ : str = q.pop()
snake_case_ : Dict = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_lowercase )
q.push(_lowercase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
snake_case_ : Tuple = cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , _lowercase ) - 1:
snake_case_ : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowerCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase : Dict = AVLtree()
__lowerCAmelCase : List[str] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''blenderbot-small'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowercase=5_0_2_6_5 , _lowercase=5_1_2 , _lowercase=8 , _lowercase=2_0_4_8 , _lowercase=1_6 , _lowercase=8 , _lowercase=2_0_4_8 , _lowercase=1_6 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=5_1_2 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1 , _lowercase=False , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=2 , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = d_model
snake_case_ : List[str] = encoder_ffn_dim
snake_case_ : Any = encoder_layers
snake_case_ : Optional[Any] = encoder_attention_heads
snake_case_ : int = decoder_ffn_dim
snake_case_ : List[str] = decoder_layers
snake_case_ : str = decoder_attention_heads
snake_case_ : str = dropout
snake_case_ : List[str] = attention_dropout
snake_case_ : Any = activation_dropout
snake_case_ : Optional[int] = activation_function
snake_case_ : int = init_std
snake_case_ : List[str] = encoder_layerdrop
snake_case_ : Dict = decoder_layerdrop
snake_case_ : List[Any] = use_cache
snake_case_ : Optional[Any] = encoder_layers
snake_case_ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case_ : int = {0: """batch"""}
snake_case_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
snake_case_ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case_ , snake_case_ : int = self.num_layers
for i in range(_lowercase ):
snake_case_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case_ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case_ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Any = super().outputs
else:
snake_case_ : Optional[int] = super(_lowercase , self ).outputs
if self.use_past:
snake_case_ , snake_case_ : str = self.num_layers
for i in range(_lowercase ):
snake_case_ : str = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case_ : Dict = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
snake_case_ : List[str] = seq_length if not self.use_past else 1
snake_case_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
snake_case_ : Optional[Any] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case_ : Any = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_ , snake_case_ : Optional[int] = common_inputs["""input_ids"""].shape
snake_case_ : Optional[int] = common_inputs["""decoder_input_ids"""].shape[1]
snake_case_ , snake_case_ : Optional[Any] = self.num_attention_heads
snake_case_ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ : Tuple = decoder_seq_length + 3
snake_case_ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowercase , _lowercase )] , dim=1 )
snake_case_ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ , snake_case_ : List[str] = self.num_layers
snake_case_ : List[str] = min(_lowercase , _lowercase )
snake_case_ : Optional[Any] = max(_lowercase , _lowercase ) - min_num_layers
snake_case_ : Optional[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
snake_case_ : Optional[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case_ , snake_case_ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ : List[Any] = seqlen + 2
snake_case_ , snake_case_ : Any = self.num_layers
snake_case_ , snake_case_ : Any = self.num_attention_heads
snake_case_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ : str = common_inputs["""attention_mask"""].dtype
snake_case_ : Tuple = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
snake_case_ : int = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Any = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Any = tokenizer.num_special_tokens_to_add(_lowercase )
snake_case_ : Dict = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ : List[Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
elif self.task == "causal-lm":
snake_case_ : int = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
snake_case_ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case_ : Union[str, Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
snake_case_ : int = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ):
'''simple docstring'''
snake_case_ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
snake_case_ : Optional[Any] = 1 - (matter_density + radiation_density + dark_energy)
snake_case_ : Optional[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case_ : Union[str, Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowerCAmelCase : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 1 |
"""simple docstring"""
__lowerCAmelCase : Any = range(2, 20 + 1)
__lowerCAmelCase : Optional[int] = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
snake_case_ : str = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
snake_case_ , snake_case_ : str = 0, 0
snake_case_ : int = n - i
snake_case_ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
snake_case_ : Any = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
snake_case_ : Optional[int] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ : Union[str, Any] = _k
break
if max_jump >= 0:
snake_case_ , snake_case_ , snake_case_ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ : List[str] = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
snake_case_ , snake_case_ : List[str] = divmod(__UpperCamelCase , 1_0 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
snake_case_ : Tuple = []
else:
snake_case_ : List[Any] = {c: []}
snake_case_ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ , snake_case_ : Tuple = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ , snake_case_ : List[str] = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
snake_case_ : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ : Union[str, Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ : str = i
snake_case_ , snake_case_ , snake_case_ : List[str] = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ : Union[str, Any] = ds_c + ds_b
diff += addend
snake_case_ : Tuple = 0
for j in range(__UpperCamelCase ):
snake_case_ : Any = a_i[j] + addend
snake_case_ , snake_case_ : Optional[int] = divmod(__UpperCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
snake_case_ : Tuple = digits[j] + addend
if s >= 1_0:
snake_case_ , snake_case_ : Optional[Any] = divmod(__UpperCamelCase , 1_0 )
snake_case_ : Dict = addend // 1_0 + quotient
else:
snake_case_ : Tuple = s
snake_case_ : Union[str, Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
snake_case_ , snake_case_ : Dict = divmod(__UpperCamelCase , 1_0 )
digits.append(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0**1_5 ):
'''simple docstring'''
snake_case_ : List[str] = [1]
snake_case_ : Any = 1
snake_case_ : str = 0
while True:
snake_case_ , snake_case_ : int = next_term(__UpperCamelCase , 2_0 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
snake_case_ : Tuple = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 21 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 21 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Any = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''mvp'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowercase=5_0_2_6_7 , _lowercase=1_0_2_4 , _lowercase=1_2 , _lowercase=4_0_9_6 , _lowercase=1_6 , _lowercase=1_2 , _lowercase=4_0_9_6 , _lowercase=1_6 , _lowercase=0.0 , _lowercase=0.0 , _lowercase="gelu" , _lowercase=1_0_2_4 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=0.0 , _lowercase=False , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=True , _lowercase=2 , _lowercase=2 , _lowercase=False , _lowercase=1_0_0 , _lowercase=8_0_0 , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : Tuple = d_model
snake_case_ : Union[str, Any] = encoder_ffn_dim
snake_case_ : Optional[Any] = encoder_layers
snake_case_ : Dict = encoder_attention_heads
snake_case_ : Optional[Any] = decoder_ffn_dim
snake_case_ : Optional[Any] = decoder_layers
snake_case_ : Union[str, Any] = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : Any = attention_dropout
snake_case_ : List[str] = activation_dropout
snake_case_ : List[str] = activation_function
snake_case_ : Union[str, Any] = init_std
snake_case_ : Tuple = encoder_layerdrop
snake_case_ : str = decoder_layerdrop
snake_case_ : int = classifier_dropout
snake_case_ : Tuple = use_cache
snake_case_ : str = encoder_layers
snake_case_ : str = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : int = use_prompt
snake_case_ : Any = prompt_length
snake_case_ : int = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , _lowercase ):
snake_case_ : Any = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 21 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
__lowerCAmelCase : Optional[int] = tf.data.AUTOTUNE
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=__UpperCamelCase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=__UpperCamelCase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=__UpperCamelCase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=__UpperCamelCase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=__UpperCamelCase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=__UpperCamelCase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=__UpperCamelCase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=__UpperCamelCase , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=__UpperCamelCase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=__UpperCamelCase , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=__UpperCamelCase , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=__UpperCamelCase , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=__UpperCamelCase , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=__UpperCamelCase , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ : List[Any] = parser.parse_args()
return args
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
try:
if args.tpu_name:
snake_case_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(__UpperCamelCase )
return tpu
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[int] = 0
for file in file_list:
snake_case_ : List[str] = file.split("""/""" )[-1]
snake_case_ : Tuple = re.search(r"""-\d+-(\d+)\.tfrecord""" , __UpperCamelCase ).group(1 )
snake_case_ : int = int(__UpperCamelCase )
num_samples += sample_count
return num_samples
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=None ):
'''simple docstring'''
snake_case_ : Tuple = count_samples(__UpperCamelCase )
snake_case_ : List[Any] = tf.data.Dataset.from_tensor_slices(__UpperCamelCase )
if shuffle:
snake_case_ : Union[str, Any] = dataset.shuffle(len(__UpperCamelCase ) )
snake_case_ : Dict = tf.data.TFRecordDataset(__UpperCamelCase , num_parallel_reads=__UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ : List[str] = dataset.apply(tf.data.experimental.assert_cardinality(__UpperCamelCase ) )
snake_case_ : int = dataset.map(__UpperCamelCase , num_parallel_calls=__UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ : str = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ : Optional[int] = dataset.batch(__UpperCamelCase , drop_remainder=__UpperCamelCase )
snake_case_ : Union[str, Any] = dataset.map(__UpperCamelCase , num_parallel_calls=__UpperCamelCase )
snake_case_ : Dict = dataset.prefetch(__UpperCamelCase )
return dataset
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
if not args.no_tpu:
snake_case_ : List[Any] = initialize_tpu(__UpperCamelCase )
snake_case_ : Dict = tf.distribute.TPUStrategy(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ : Optional[int] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ : int = tokenizer.vocab_size
snake_case_ : List[str] = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
snake_case_ : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
snake_case_ : Optional[int] = count_samples(__UpperCamelCase )
snake_case_ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ : List[str] = TFAutoModelForMaskedLM.from_config(__UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ : Union[str, Any] = create_optimizer(
num_train_steps=__UpperCamelCase , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__UpperCamelCase , metrics=["""accuracy"""] )
def decode_fn(__UpperCamelCase : Union[str, Any] ):
snake_case_ : Tuple = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__UpperCamelCase , __UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ : int = DataCollatorForLanguageModeling(
tokenizer=__UpperCamelCase , mlm_probability=args.mlm_probability , mlm=__UpperCamelCase , return_tensors="""tf""" )
def mask_with_collator(__UpperCamelCase : Tuple ):
# TF really needs an isin() function
snake_case_ : Optional[Any] = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ : int = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(__UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__UpperCamelCase , )
return batch
snake_case_ : Optional[int] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ : Any = prepare_dataset(
__UpperCamelCase , decode_fn=__UpperCamelCase , mask_fn=__UpperCamelCase , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ : List[Any] = prepare_dataset(
__UpperCamelCase , decode_fn=__UpperCamelCase , mask_fn=__UpperCamelCase , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , )
snake_case_ : List[str] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__UpperCamelCase ) )
model.fit(
__UpperCamelCase , validation_data=__UpperCamelCase , epochs=args.num_epochs , callbacks=__UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : int = parse_args()
main(args)
| 21 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowerCAmelCase : int = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__lowerCAmelCase : List[str] = parser.parse_args()
if args.model_type == "bert":
__lowerCAmelCase : Tuple = BertForMaskedLM.from_pretrained(args.model_name)
__lowerCAmelCase : Optional[Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__lowerCAmelCase : List[str] = model.state_dict()
__lowerCAmelCase : Any = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowerCAmelCase : List[Any] = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowerCAmelCase : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
__lowerCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowerCAmelCase : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowerCAmelCase : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowerCAmelCase : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowerCAmelCase : str = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowerCAmelCase : Tuple = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowerCAmelCase : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowerCAmelCase : Tuple = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowerCAmelCase : Any = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowerCAmelCase : List[str] = state_dict['''cls.predictions.decoder.weight''']
__lowerCAmelCase : Tuple = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCAmelCase : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
__lowerCAmelCase : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 21 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : list ):
'''simple docstring'''
snake_case_ : Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
snake_case_ : Dict = np.array(__UpperCamelCase )
snake_case_ : Any = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : list ):
'''simple docstring'''
snake_case_ : Union[str, Any] = (1, 2, 1)
snake_case_ : Optional[Any] = (1, 1, 0, 7)
snake_case_ : Tuple = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
snake_case_ : List[str] = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="""nm""" )
snake_case_ : Union[str, Any] = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : list ):
'''simple docstring'''
snake_case_ : str = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Tuple = regressor.predict(__UpperCamelCase )
return y_pred[0]
def __lowerCAmelCase ( __UpperCamelCase : list ):
'''simple docstring'''
train_user.sort()
snake_case_ : Optional[int] = np.percentile(__UpperCamelCase , 2_5 )
snake_case_ : List[str] = np.percentile(__UpperCamelCase , 7_5 )
snake_case_ : List[str] = qa - qa
snake_case_ : Optional[int] = qa - (iqr * 0.1)
return low_lim
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : float ):
'''simple docstring'''
snake_case_ : Any = 0
snake_case_ : str = 0
for i in list_vote:
if i > actual_result:
snake_case_ : Tuple = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowerCAmelCase : Optional[int] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__lowerCAmelCase : Any = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
__lowerCAmelCase : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
__lowerCAmelCase : str = normalize_df[:, 2].tolist()
__lowerCAmelCase : Union[str, Any] = normalize_df[:, 0].tolist()
__lowerCAmelCase : Tuple = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowerCAmelCase : Tuple = normalize_df[:, [1, 2]].tolist()
__lowerCAmelCase : Union[str, Any] = x[: len(x) - 1]
__lowerCAmelCase : Union[str, Any] = x[len(x) - 1 :]
# for linear regression & sarimax
__lowerCAmelCase : Any = total_date[: len(total_date) - 1]
__lowerCAmelCase : Optional[Any] = total_user[: len(total_user) - 1]
__lowerCAmelCase : int = total_match[: len(total_match) - 1]
__lowerCAmelCase : List[Any] = total_date[len(total_date) - 1 :]
__lowerCAmelCase : str = total_user[len(total_user) - 1 :]
__lowerCAmelCase : List[str] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowerCAmelCase : Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowerCAmelCase : Dict = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 21 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XGLMTokenizer
_lowerCamelCase = XGLMTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Any = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = """<pad>"""
snake_case_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(_lowercase ) , 1_0_0_8 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
snake_case_ : List[str] = XGLMTokenizer(f.name , keep_accents=_lowercase )
snake_case_ : int = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Union[str, Any] = """I was born in 92000, and this is falsé."""
snake_case_ : Tuple = tokenizer.tokenize(_lowercase )
snake_case_ : Tuple = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : Dict = self.get_rust_tokenizer()
snake_case_ : Dict = tokenizer.encode(_lowercase )
snake_case_ : Optional[int] = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = """Hello World!"""
snake_case_ : List[Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
snake_case_ : Optional[Any] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""facebook/xglm-564M""" , padding=_lowercase , )
| 21 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nllb-moe'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowercase=1_2_8_1_1_2 , _lowercase=1_0_2_4 , _lowercase=1_2 , _lowercase=4_0_9_6 , _lowercase=1_6 , _lowercase=1_2 , _lowercase=4_0_9_6 , _lowercase=1_6 , _lowercase=0.05 , _lowercase=0.05 , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=1_0_2_4 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=2 , _lowercase=True , _lowercase=False , _lowercase="float32" , _lowercase=False , _lowercase=1_2_8 , _lowercase=6_4 , _lowercase=4 , _lowercase=4 , _lowercase=0.001 , _lowercase=0.001 , _lowercase="all" , _lowercase=False , _lowercase=False , _lowercase=1.0 , _lowercase=0.2 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=False , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = vocab_size
snake_case_ : Any = max_position_embeddings
snake_case_ : str = d_model
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : Optional[int] = encoder_layers
snake_case_ : Union[str, Any] = encoder_attention_heads
snake_case_ : List[Any] = decoder_ffn_dim
snake_case_ : Optional[Any] = decoder_layers
snake_case_ : Optional[Any] = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : int = attention_dropout
snake_case_ : int = activation_dropout
snake_case_ : List[str] = activation_function
snake_case_ : Any = init_std
snake_case_ : str = encoder_layerdrop
snake_case_ : Optional[int] = decoder_layerdrop
snake_case_ : Optional[int] = use_cache
snake_case_ : Tuple = encoder_layers
snake_case_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ : int = router_z_loss_coef
snake_case_ : Optional[int] = router_aux_loss_coef
snake_case_ : Union[str, Any] = decoder_sparse_step
snake_case_ : Any = encoder_sparse_step
snake_case_ : Optional[Any] = num_experts
snake_case_ : Union[str, Any] = expert_capacity
snake_case_ : Union[str, Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
snake_case_ : Optional[int] = router_dtype
snake_case_ : Optional[Any] = router_ignore_padding_tokens
snake_case_ : Union[str, Any] = batch_prioritized_routing
snake_case_ : List[str] = second_expert_policy
snake_case_ : Optional[Any] = normalize_router_prob_before_dropping
snake_case_ : List[Any] = moe_eval_capacity_token_fraction
snake_case_ : List[Any] = moe_token_dropout
snake_case_ : Tuple = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import math
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(__UpperCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowerCAmelCase : Any = '''Enter the base and the power separated by a comma: '''
__lowerCAmelCase , __lowerCAmelCase : int = map(int, input(prompt).split(''','''))
__lowerCAmelCase , __lowerCAmelCase : Any = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowerCAmelCase : Tuple = res(xa, ya)
__lowerCAmelCase : List[Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.