code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Any
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
SCREAMING_SNAKE_CASE_ : Any = [input_list.count(lowerCamelCase_ ) for value in input_list]
SCREAMING_SNAKE_CASE_ : Dict = max(lowerCamelCase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = eval_examples
lowerCAmelCase = post_process_function
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = "eval"):
"""simple docstring"""
lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase = self.get_eval_dataloader(__lowerCAmelCase)
lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase = time.time()
try:
lowerCAmelCase = eval_loop(
__lowerCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
lowerCAmelCase = compute_metrics
lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions)
lowerCAmelCase = self.compute_metrics(__lowerCAmelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
lowerCAmelCase = metrics.pop(__lowerCAmelCase)
metrics.update(output.metrics)
else:
lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCAmelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCAmelCase)
return metrics
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase = "test"):
"""simple docstring"""
lowerCAmelCase = self.get_test_dataloader(__lowerCAmelCase)
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase = time.time()
try:
lowerCAmelCase = eval_loop(
__lowerCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCAmelCase , metric_key_prefix=__lowerCAmelCase , )
finally:
lowerCAmelCase = compute_metrics
lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__lowerCAmelCase , __lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase = self.post_process_function(__lowerCAmelCase , __lowerCAmelCase , output.predictions , """predict""")
lowerCAmelCase = self.compute_metrics(__lowerCAmelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
lowerCAmelCase = metrics.pop(__lowerCAmelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCAmelCase)
| 370 | 0 |
from __future__ import annotations
import time
import numpy as np
a = [8, 5, 9, 7]
a = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[list[int]] , ):
'''simple docstring'''
lowercase_ = claim_vector
lowercase_ = allocated_resources_table
lowercase_ = maximum_claim_table
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {self.__need().index(UpperCamelCase__ ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.__need()
lowercase_ = self.__allocated_resources_table
lowercase_ = self.__available_resources()
lowercase_ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
lowercase_ = False
for each_need in need_list:
lowercase_ = True
for index, need in enumerate(UpperCamelCase__ ):
if need > available_resources[index]:
lowercase_ = False
break
if execution:
lowercase_ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase_ = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(UpperCamelCase__ )
# update available/freed resources stack
lowercase_ = np.array(UpperCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCamelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(UpperCamelCase__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(UpperCamelCase__ ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCamelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650 | 1 |
from __future__ import annotations
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 472 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = PhobertTokenizer
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
snake_case__ = ['#version: 0.2', 'l à</w>']
snake_case__ = {'unk_token': '<unk>'}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def lowerCAmelCase_ ( self: str , **UpperCamelCase: Dict ) -> int:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Any ) -> Optional[int]:
snake_case__ = 'Tôi là VinAI Research'
snake_case__ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def lowerCAmelCase_ ( self: Tuple ) -> Tuple:
snake_case__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ = 'Tôi là VinAI Research'
snake_case__ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
snake_case__ = tokenizer.tokenize(UpperCamelCase )
print(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
| 328 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase_ :Optional[List[bool]]
lowerCamelCase_ :Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 701 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Optional[int] = '''BlipImageProcessor'''
lowerCamelCase_ :Union[str, Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = False
super().__init__(snake_case_ , snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCAmelCase_ : str = self.tokenizer
UpperCAmelCase_ : Optional[int] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 389 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowercase__ : Dict , lowercase__ : List[str]=2 , lowercase__ : str=8 , lowercase__ : List[str]=True , lowercase__ : Union[str, Any]=True , lowercase__ : Optional[int]=True , lowercase__ : List[str]=True , lowercase__ : int=9_9 , lowercase__ : List[Any]=1_6 , lowercase__ : Tuple=5 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=3_6 , lowercase__ : Union[str, Any]="gelu" , lowercase__ : Tuple=0.0 , lowercase__ : List[Any]=0.0 , lowercase__ : List[Any]=5_1_2 , lowercase__ : Optional[int]=1_6 , lowercase__ : int=2 , lowercase__ : Any=0.0_2 , lowercase__ : Any=3 , lowercase__ : Any=4 , lowercase__ : int=None , ):
__lowercase : int = parent
__lowercase : List[Any] = batch_size
__lowercase : Optional[Any] = seq_length
__lowercase : List[Any] = is_training
__lowercase : Optional[int] = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Optional[int] = max_position_embeddings
__lowercase : Any = type_vocab_size
__lowercase : Optional[Any] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : Any = num_labels
__lowercase : Optional[int] = num_choices
__lowercase : Optional[Any] = scope
def snake_case ( self : List[str] ):
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = None
if self.use_input_mask:
__lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : int = None
if self.use_token_type_ids:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : str = None
__lowercase : Tuple = None
__lowercase : List[str] = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Optional[int] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def snake_case ( self : int ):
__lowercase : Dict = self.get_config()
__lowercase : List[str] = 3_0_0
return config
def snake_case ( self : Dict ):
(
__lowercase
) : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : List[Any] = True
__lowercase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self : str , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : List[str] ):
__lowercase : Any = MraModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
__lowercase : List[str] = model(lowercase_ , token_type_ids=lowercase_ )
__lowercase : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : Optional[Any] , ):
__lowercase : Any = True
__lowercase : List[Any] = MraModel(lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__lowercase : Any = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , )
__lowercase : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : List[str] ):
__lowercase : List[Any] = MraForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Tuple ):
__lowercase : List[Any] = MraForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : str , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[str] ):
__lowercase : int = self.num_labels
__lowercase : int = MraForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Optional[Any] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : str ):
__lowercase : List[Any] = self.num_labels
__lowercase : Union[str, Any] = MraForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Dict , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[Any] ):
__lowercase : Union[str, Any] = self.num_choices
__lowercase : int = MraForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Union[str, Any] ):
__lowercase : Dict = self.prepare_config_and_inputs()
(
__lowercase
) : Optional[Any] = config_and_inputs
__lowercase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : List[Any] = ()
def snake_case ( self : Optional[int] ):
__lowercase : Any = MraModelTester(self )
__lowercase : int = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Tuple ):
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case ( self : str ):
__lowercase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Dict = type
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case ( self : Dict ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def snake_case ( self : Tuple ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def snake_case ( self : Optional[int] ):
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case ( self : Optional[Any] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case ( self : str ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def snake_case ( self : Dict ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Union[str, Any] = MraModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason="MRA does not output attentions" )
def snake_case ( self : Optional[Any] ):
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Optional[Any] ):
__lowercase : Union[str, Any] = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
__lowercase : Union[str, Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowercase : str = model(lowercase_ )[0]
__lowercase : Optional[int] = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowercase_ )
__lowercase : int = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
__lowercase : Union[str, Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowercase : Optional[int] = model(lowercase_ )[0]
__lowercase : Optional[Any] = 5_0_2_6_5
__lowercase : Optional[Any] = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
__lowercase : Optional[int] = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : Any = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
__lowercase : Optional[int] = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowercase : Tuple = model(lowercase_ )[0]
__lowercase : List[str] = 5_0_2_6_5
__lowercase : Optional[Any] = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
__lowercase : Tuple = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 575 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Dict=True , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=99 , lowercase_ : str=32 , lowercase_ : List[str]=5 , lowercase_ : List[str]=4 , lowercase_ : List[Any]=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Dict=0.02 , lowercase_ : Any=4 , ):
snake_case_ : str = parent
snake_case_ : List[Any] = batch_size
snake_case_ : int = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : List[Any] = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : Any = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : int = num_choices
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = None
if self.use_attention_mask:
snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : int = config_and_inputs
snake_case_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
snake_case_ : str = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=lowercase_ )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[int] = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
snake_case_ : Dict = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : List[Any] = model(lowercase_ )[0]
snake_case_ : List[str] = 50000
snake_case_ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , lowercase_ )
snake_case_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 123 | 0 |
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 8.3_1_4_4_5_9_8
def __magic_name__( _A , _A ):
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase_ : List[Any] = 300
lowerCamelCase_ : Any = 28
lowerCamelCase_ : int = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 265 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Optional[Any] = ["pixel_values"]
def __init__( self : str , lowercase : bool = True , lowercase : int = 3_2 , lowercase : List[Any]=PILImageResampling.BILINEAR , lowercase : bool = True , **lowercase : str , ) -> None:
'''simple docstring'''
UpperCamelCase__ = do_resize
UpperCamelCase__ = do_rescale
UpperCamelCase__ = size_divisor
UpperCamelCase__ = resample
super().__init__(**lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : List[Any] , lowercase : Optional[ChannelDimension] = None , **lowercase : Any ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = get_image_size(lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ = height // size_divisor * size_divisor
UpperCamelCase__ = width // size_divisor * size_divisor
UpperCamelCase__ = resize(lowercase , (new_h, new_w) , resample=lowercase , data_format=lowercase , **lowercase )
return image
def A ( self : int , lowercase : np.ndarray , lowercase : float , lowercase : Optional[ChannelDimension] = None , **lowercase : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : int , lowercase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Any]=None , lowercase : Optional[bool] = None , lowercase : Optional[Union[TensorType, str]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : str , ) -> BatchFeature:
'''simple docstring'''
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCamelCase__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(lowercase ) for img in images]
if do_resize:
UpperCamelCase__ = [self.resize(lowercase , size_divisor=lowercase , resample=lowercase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(lowercase , scale=1 / 2_5_5 ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 265 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> List[Any]:
"""simple docstring"""
__A = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
__A , __A = input_paths_and_base_extractors[compression_format]
if input_path is None:
__A = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a_ )
assert base_extractor.is_extractable(a_ )
__A = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(a_ , a_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__A = file_path.read_text(encoding="utf-8" )
else:
__A = output_path.read_text(encoding="utf-8" )
__A = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Optional[int]:
"""simple docstring"""
__A = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
__A = input_paths[compression_format]
if input_path is None:
__A = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a_ )
__A = Extractor.infer_extractor_format(a_ )
assert extractor_format is not None
__A = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(a_ , a_ , a_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__A = file_path.read_text(encoding="utf-8" )
else:
__A = output_path.read_text(encoding="utf-8" )
__A = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
import tarfile
__A = tmp_path / "data_dot_dot"
directory.mkdir()
__A = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(a_ , "w" ) as f:
f.add(a_ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
import tarfile
__A = tmp_path / "data_sym_link"
directory.mkdir()
__A = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=a_ )
with tarfile.TarFile(a_ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
__A = insecure_tar_files[insecure_tar_file]
__A = tmp_path / "extracted"
TarExtractor.extract(a_ , a_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
__A = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(a_ )
assert zipfile.is_zipfile(str(a_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(a_ ) # but we're right
| 55 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 512 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowercase__ :
a_ =PegasusConfig
a_ ={}
a_ ="""gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=40 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = TFPegasusModel(config=__UpperCAmelCase ).get_decoder()
lowerCAmelCase__ = inputs_dict["input_ids"]
lowerCAmelCase__ = input_ids[:1, :]
lowerCAmelCase__ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase__ = inputs_dict["head_mask"]
lowerCAmelCase__ = 1
# first forward pass
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
lowerCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : int=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a_ =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a_ =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ =True
a_ =False
a_ =False
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = TFPegasusModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase__ ( unittest.TestCase ):
a_ =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
a_ =[
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a_ ="""google/pegasus-xsum"""
@cached_property
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase ( self , **__UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.translate_src_text(**__UpperCAmelCase )
assert self.expected_text == generated_words
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" )
lowerCAmelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
lowerCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )
return generated_words
@slow
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 115 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=0.999 , UpperCamelCase_ : Union[str, Any]="cosine" , ) -> str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = i / num_diffusion_timesteps
lowerCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase ):
a_ =[e.name for e in KarrasDiffusionSchedulers]
a_ =2
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = 0.00_085 , __UpperCAmelCase = 0.012 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = "linspace" , __UpperCAmelCase = 0 , )-> int:
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowerCAmelCase__ = 1.0 - self.betas
lowerCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> str:
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ = self.timesteps
lowerCAmelCase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
lowerCAmelCase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.index_for_timestep(__UpperCAmelCase )
if self.state_in_first_order:
lowerCAmelCase__ = self.sigmas[step_index]
else:
lowerCAmelCase__ = self.sigmas_interpol[step_index]
lowerCAmelCase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
lowerCAmelCase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ = torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
lowerCAmelCase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
# interpolate sigmas
lowerCAmelCase__ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCAmelCase__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__UpperCAmelCase ).startswith("mps" ):
# mps does not support float64
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=torch.floataa )
else:
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
# interpolate timesteps
lowerCAmelCase__ = self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase , dtype=timesteps.dtype )
lowerCAmelCase__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCAmelCase__ = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCAmelCase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ = defaultdict(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = sigma.log()
# get distribution
lowerCAmelCase__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCAmelCase__ = low_idx + 1
lowerCAmelCase__ = self.log_sigmas[low_idx]
lowerCAmelCase__ = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ = (low - log_sigma) / (low - high)
lowerCAmelCase__ = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ = t.view(sigma.shape )
return t
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return self.sample is None
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , )-> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase__ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
lowerCAmelCase__ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ = self.sigmas[step_index]
lowerCAmelCase__ = self.sigmas_interpol[step_index + 1]
lowerCAmelCase__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase__ = self.sigmas[step_index - 1]
lowerCAmelCase__ = self.sigmas_interpol[step_index]
lowerCAmelCase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ = 0
lowerCAmelCase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase__ = sigma_next - sigma_hat
lowerCAmelCase__ = self.sample
lowerCAmelCase__ = None
lowerCAmelCase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
lowerCAmelCase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ = self.timesteps.to(original_samples.device )
lowerCAmelCase__ = timesteps.to(original_samples.device )
lowerCAmelCase__ = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
lowerCAmelCase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ = sigma.unsqueeze(-1 )
lowerCAmelCase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self )-> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 115 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = LlamaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = True
__a = LlamaModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = LlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = True
__a = True
__a = LlamaForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_snake_case : Tuple = (LlamaForCausalLM,) if is_torch_available() else ()
_snake_case : Tuple = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Tuple = False
def a__ ( self ):
__a = LlamaModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict["input_ids"]
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = LlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = "single_label_classification"
__a = input_dict["input_ids"]
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = LlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = "multi_label_classification"
__a = input_dict["input_ids"]
__a = input_ids.ne(1 ).to(lowerCamelCase )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = LlamaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def a__ ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def a__ ( self , lowerCamelCase ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ids_tensor([1, 10] , config.vocab_size )
__a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = LlamaModel(lowerCamelCase )
original_model.to(lowerCamelCase )
original_model.eval()
__a = original_model(lowerCamelCase ).last_hidden_state
__a = original_model(lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = {"type": scaling_type, "factor": 10.0}
__a = LlamaModel(lowerCamelCase )
scaled_model.to(lowerCamelCase )
scaled_model.eval()
__a = scaled_model(lowerCamelCase ).last_hidden_state
__a = scaled_model(lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def a__ ( self ):
__a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__a = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__a = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def a__ ( self ):
__a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__a = model(torch.tensor(lowerCamelCase ) )
# Expected mean on dim = -1
__a = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__a = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def a__ ( self ):
__a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__a = model(torch.tensor(lowerCamelCase ) )
# Expected mean on dim = -1
__a = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__a = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def a__ ( self ):
__a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
__a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__a = model(torch.tensor(lowerCamelCase ) )
__a = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__a = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def a__ ( self ):
__a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__a = "Simply put, the theory of relativity states that "
__a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__a = tokenizer.encode(lowerCamelCase , return_tensors="pt" )
__a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase )
# greedy generation outputs
__a = model.generate(lowerCamelCase , max_new_tokens=64 , top_p=lowerCamelCase , temperature=1 , do_sample=lowerCamelCase )
__a = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 528 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = nn.ModuleList(lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase , lowerCamelCase , self.nets ) ):
__a , __a = controlnet(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
# merge samples
if i == 0:
__a , __a = down_samples, mid_sample
else:
__a = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase , lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def a__ ( self , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = None , ):
__a = 0
__a = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase , is_main_process=lowerCamelCase , save_function=lowerCamelCase , safe_serialization=lowerCamelCase , variant=lowerCamelCase , )
idx += 1
__a = model_path_to_save + F"_{idx}"
@classmethod
def a__ ( cls , lowerCamelCase , **lowerCamelCase ):
__a = 0
__a = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__a = pretrained_model_path
while os.path.isdir(lowerCamelCase ):
__a = ControlNetModel.from_pretrained(lowerCamelCase , **lowerCamelCase )
controlnets.append(lowerCamelCase )
idx += 1
__a = pretrained_model_path + F"_{idx}"
logger.info(F"{len(lowerCamelCase )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase )
| 528 | 1 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , )-> int:
lowerCamelCase_ =parent
lowerCamelCase_ =vocab_size
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ =(image_size // patch_size) ** 2
lowerCamelCase_ =num_patches + 1
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ =BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Any:
lowerCamelCase_ =FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =self.type_sequence_label_size
lowerCamelCase_ =FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ =1
lowerCamelCase_ =FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) =config_and_inputs
lowerCamelCase_ ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Tuple = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _snake_case ( self )-> None:
lowerCamelCase_ =FlaxBeitModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> int:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
lowerCamelCase_ , lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase_ =model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase_ =model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
for model_class_name in self.all_model_classes:
lowerCamelCase_ =model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
lowerCamelCase_ =model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def _snake_case ( self )-> Union[str, Any]:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
lowerCamelCase_ =np.ones((1, 196) , dtype=_SCREAMING_SNAKE_CASE )
# forward pass
lowerCamelCase_ =model(pixel_values=_SCREAMING_SNAKE_CASE , bool_masked_pos=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =(1, 196, 8192)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9174], [-3.2_4_5_6, 0.4_9_4_8, -13.9401], [-3.2_0_3_3, 0.5_1_2_1, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# forward pass
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =(1, 1000)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowerCamelCase_ =281
self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
# forward pass
lowerCamelCase_ =model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =(1, 2_1841)
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowerCamelCase_ =2396
self.assertEqual(logits.argmax(-1 ).item() , _SCREAMING_SNAKE_CASE )
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , torch_builtin(_SCREAMING_SNAKE_CASE ) ) )
self.assertFalse(torch.allclose(gelu_python(_SCREAMING_SNAKE_CASE ) , gelu_new(_SCREAMING_SNAKE_CASE ) ) )
def _snake_case ( self )-> int:
lowerCamelCase_ =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =get_activation("""gelu_10""" )
lowerCamelCase_ =torch_builtin(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =geluaa(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_SCREAMING_SNAKE_CASE ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self )-> Dict:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation("""bogus""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
get_activation(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
lowerCamelCase_ =get_activation("""gelu""" )
lowerCamelCase_ =1
lowerCamelCase_ =get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =acta.a
| 75 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :str=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=99 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :Optional[int]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Any=37 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]=512 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=4 , ) -> int:
'''simple docstring'''
snake_case_ : str = parent
snake_case_ : Any = batch_size
snake_case_ : Any = seq_length
snake_case_ : Dict = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : str = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : List[Any] = num_choices
def _A ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_attention_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = True
a__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = FlaxRoFormerModelTester(self )
@slow
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : str = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCAmelCase__ )
snake_case_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class A_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : Any = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case_ : int = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Any = model(lowerCAmelCase__ )[0]
snake_case_ : Any = 50_000
snake_case_ : Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Dict = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Optional[Any] = DistilBertTokenizer
_lowerCamelCase : int = DistilBertTokenizerFast
_lowerCamelCase : Tuple = True
@slow
def __magic_name__ ( self ):
a_ = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
a_ = tokenizer.encode("""sequence builders""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
a_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_SCREAMING_SNAKE_CASE )
a_ = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
a_ = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 403 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
return getitem, k
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Dict , UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return setitem, k, v
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return delitem, k
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Dict , *UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
try:
return fun(UpperCamelCase , *UpperCamelCase ), None
except Exception as e:
return None, e
_A = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
a_ = HashMap(initial_block_size=4 )
a_ = {}
for _, (fun, *args) in enumerate(UpperCamelCase ):
a_ , a_ = _run_operation(UpperCamelCase , UpperCamelCase , *UpperCamelCase )
a_ , a_ = _run_operation(UpperCamelCase , UpperCamelCase , *UpperCamelCase )
assert my_res == py_res
assert str(UpperCamelCase ) == str(UpperCamelCase )
assert set(UpperCamelCase ) == set(UpperCamelCase )
assert len(UpperCamelCase ) == len(UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
def is_public(UpperCamelCase : str ) -> bool:
return not name.startswith("""_""" )
a_ = {name for name in dir({} ) if is_public(UpperCamelCase )}
a_ = {name for name in dir(HashMap() ) if is_public(UpperCamelCase )}
assert dict_public_names > hash_public_names
| 403 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def snake_case_ (__A : Optional[Any] , __A : tuple , __A : Path , __A : Optional[int] , __A : Union[str, Any] , __A : Optional[int] , __A : Union[str, Any] , __A : str=False , ) -> Optional[Any]:
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def snake_case_ (__A : str , __A : str , __A : int , __A : bool = False ) -> List[Any]:
__lowerCAmelCase : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase : int = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
__lowerCAmelCase : Union[str, Any] = """cpu"""
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__A , torch_dtype=__A ).to(__A )
__lowerCAmelCase : int = Path(__A )
# TEXT ENCODER
__lowerCAmelCase : int = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase : Optional[int] = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase : Union[str, Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__A , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=__A , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase : Optional[int] = pipeline.unet.config.in_channels
__lowerCAmelCase : Tuple = pipeline.unet.config.sample_size
__lowerCAmelCase : Optional[Any] = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __A , __A , __A ).to(device=__A , dtype=__A ),
torch.randn(2 ).to(device=__A , dtype=__A ),
torch.randn(2 , __A , __A ).to(device=__A , dtype=__A ),
False,
) , output_path=__A , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=__A , use_external_data_format=__A , )
__lowerCAmelCase : str = str(unet_path.absolute().as_posix() )
__lowerCAmelCase : Union[str, Any] = os.path.dirname(__A )
__lowerCAmelCase : Optional[Any] = onnx.load(__A )
# clean up existing tensor files
shutil.rmtree(__A )
os.mkdir(__A )
# collate external tensor files into one
onnx.save_model(
__A , __A , save_as_external_data=__A , all_tensors_to_one_file=__A , location="""weights.pb""" , convert_attribute=__A , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase : List[str] = pipeline.vae
__lowerCAmelCase : Tuple = vae_encoder.config.in_channels
__lowerCAmelCase : Dict = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase : List[Any] = lambda __A , __A : vae_encoder.encode(__A , __A )[0].sample()
onnx_export(
__A , model_args=(
torch.randn(1 , __A , __A , __A ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
# VAE DECODER
__lowerCAmelCase : Any = pipeline.vae
__lowerCAmelCase : Union[str, Any] = vae_decoder.config.latent_channels
__lowerCAmelCase : str = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase : Optional[int] = vae_encoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , __A , __A ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__A , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase : Optional[Any] = pipeline.safety_checker
__lowerCAmelCase : Optional[int] = safety_checker.config.vision_config.num_channels
__lowerCAmelCase : str = safety_checker.config.vision_config.image_size
__lowerCAmelCase : Dict = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __A , __A , __A , ).to(device=__A , dtype=__A ),
torch.randn(1 , __A , __A , __A ).to(device=__A , dtype=__A ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=__A , )
del pipeline.safety_checker
__lowerCAmelCase : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
__lowerCAmelCase : Tuple = pipeline.feature_extractor
else:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Any = None
__lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=__A , feature_extractor=__A , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__A )
print("""ONNX pipeline saved to""" , __A )
del pipeline
del onnx_pipeline
__lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(__A , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 651 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : bool =field(default=a_ , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] =field(
default=a_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowerCAmelCase : List[Any] = v.to_dict()
return d
| 651 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = """▁"""
_UpperCAmelCase : Any = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : str = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
_UpperCAmelCase : List[str] = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__(self , __lowercase , __lowercase="</s>" , __lowercase="<unk>" , __lowercase=[] , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase , unk_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def _snake_case (self ):
return self.sp_model.get_piece_size()
def _snake_case (self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__(self , __lowercase ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case (self , __lowercase ):
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def _snake_case (self , __lowercase ):
return self.sp_model.piece_to_id(__lowercase )
def _snake_case (self , __lowercase ):
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(__lowercase )
return token
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
__lowerCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
__lowerCAmelCase = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 705 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase : str = ["""gpt2"""]
_UpperCAmelCase : Optional[int] = """gpt2"""
if is_tf_available():
class a__ ( tf.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = tokenizer
__lowerCAmelCase = AutoConfig.from_pretrained(__lowercase )
__lowerCAmelCase = TFGPTaLMHeadModel.from_config(__lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.tokenizer(__lowercase )
__lowerCAmelCase = tokenized['''input_ids'''].to_tensor()
__lowerCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCAmelCase = self.model(input_ids=__lowercase , attention_mask=__lowercase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = [GPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCAmelCase = [TFGPTaTokenizer.from_pretrained(__lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCAmelCase = python_outputs[key].numpy()
__lowerCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.function(__lowercase )
for test_inputs in self.test_sentences:
__lowerCAmelCase = tf.constant(__lowercase )
__lowerCAmelCase = compiled_tokenizer(__lowercase )
__lowerCAmelCase = tf_tokenizer(__lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = ModelToSave(tokenizer=__lowercase )
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = model.serving(__lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCAmelCase = Path(__lowercase ) / '''saved.model'''
tf.saved_model.save(__lowercase , __lowercase , signatures={'''serving_default''': model.serving} )
__lowerCAmelCase = tf.saved_model.load(__lowercase )
__lowerCAmelCase = loaded_model.signatures['''serving_default'''](__lowercase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(__lowercase ) # Build model with some sample inputs
__lowerCAmelCase = tf_tokenizer.get_config()
__lowerCAmelCase = TFGPTaTokenizer.from_config(__lowercase )
__lowerCAmelCase = model_from_config(__lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case (self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCAmelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCAmelCase = tf_tokenizer(__lowercase , max_length=__lowercase )
__lowerCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 474 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE = False
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = 12
_a = 12
_a = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_a = TransformeraDModel(**UpperCamelCase__ )
return model
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
_a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
_a = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , )
_a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 388 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ : Optional[Any] = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE_ = '636036'
SCREAMING_SNAKE_CASE_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
SCREAMING_SNAKE_CASE_ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE_ = workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE_ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , f"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = f.read().decode('UTF-8' )
return results
| 620 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_lowerCAmelCase, _lowerCAmelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_lowerCAmelCase = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_lowerCAmelCase = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCAmelCase = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 10 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(A , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 460 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1]
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] = 1_0000 ):
'''simple docstring'''
UpperCamelCase__ = []
for num in range(1, __UpperCAmelCase ):
UpperCamelCase__ = 0
UpperCamelCase__ = num
while iterations < 50:
UpperCamelCase__ = sum_reverse(__UpperCAmelCase )
iterations += 1
if is_palindrome(__UpperCAmelCase ):
break
else:
lychrel_nums.append(__UpperCAmelCase )
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 713 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''file.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''malformed_file.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_image.csv'''
UpperCamelCase__ = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_label.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_int_list.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
UpperCamelCase__ = Csv()
UpperCamelCase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCamelCase__, match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCamelCase__ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.read().splitlines()[1]
UpperCamelCase__ = Csv(encoding='''utf-8''', features=Features({'''image''': Image()} ) )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
UpperCamelCase__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.read().splitlines()[1:]
UpperCamelCase__ = Csv(encoding='''utf-8''', features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
UpperCamelCase__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCamelCase__ ) for label in labels]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = Csv(encoding='''utf-8''', sep=''',''', converters={'''int_list''': lambda UpperCamelCase__ : [int(UpperCamelCase__ ) for i in x.split()]} )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
UpperCamelCase__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 591 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : str = "http://www.mocksite.com/file1.txt"
__A : Optional[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : Dict = 2_00
__magic_name__ : List[str] = {"""Content-Length""": """100"""}
__magic_name__ : int = {}
def _UpperCAmelCase ( self : int , **UpperCamelCase__ : int ):
return [bytes(UpperCamelCase__ , "utf-8" )]
def lowercase ( *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : int ):
"""simple docstring"""
import requests
monkeypatch.setattr(UpperCamelCase , "request" , UpperCamelCase )
A__ : Union[str, Any] =URL
if issubclass(UpperCamelCase , UpperCamelCase ):
A__ : int =url
elif issubclass(UpperCamelCase , UpperCamelCase ):
A__ : Optional[int] =[url]
elif issubclass(UpperCamelCase , UpperCamelCase ):
A__ : Optional[Any] ={"train": url}
A__ : Optional[Any] ="dummy"
A__ : Any ="downloads"
A__ : List[str] =tmp_path
A__ : Any =DownloadConfig(
cache_dir=os.path.join(UpperCamelCase , UpperCamelCase ) , use_etag=UpperCamelCase , )
A__ : Any =DownloadManager(dataset_name=UpperCamelCase , download_config=UpperCamelCase )
A__ : List[Any] =dl_manager.download(UpperCamelCase )
A__ : Optional[Any] =urls
for downloaded_paths in [downloaded_paths]:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ : Optional[int] =[downloaded_paths]
A__ : Tuple =[urls]
elif isinstance(UpperCamelCase , UpperCamelCase ):
assert "train" in downloaded_paths.keys()
A__ : Tuple =downloaded_paths.values()
A__ : str =urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(UpperCamelCase , UpperCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A__ : Optional[int] =Path(UpperCamelCase )
A__ : str =downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A__ : Optional[Any] =downloaded_path.read_text()
assert content == CONTENT
A__ : Any =downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
A__ : Tuple =json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : int =str(UpperCamelCase )
if issubclass(UpperCamelCase , UpperCamelCase ):
A__ : List[Any] =filename
elif issubclass(UpperCamelCase , UpperCamelCase ):
A__ : Dict =[filename]
elif issubclass(UpperCamelCase , UpperCamelCase ):
A__ : str ={"train": filename}
A__ : Optional[int] ="dummy"
A__ : int =xz_file.parent
A__ : List[Any] ="extracted"
A__ : List[str] =DownloadConfig(
cache_dir=UpperCamelCase , use_etag=UpperCamelCase , )
A__ : List[Any] =DownloadManager(dataset_name=UpperCamelCase , download_config=UpperCamelCase )
A__ : str =dl_manager.extract(UpperCamelCase )
A__ : Union[str, Any] =paths
for extracted_paths in [extracted_paths]:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ : str =[extracted_paths]
A__ : List[Any] =[paths]
elif isinstance(UpperCamelCase , UpperCamelCase ):
assert "train" in extracted_paths.keys()
A__ : str =extracted_paths.values()
A__ : Any =paths.values()
assert extracted_paths
for extracted_path, input_path in zip(UpperCamelCase , UpperCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A__ : List[str] =Path(UpperCamelCase )
A__ : List[str] =extracted_path.parts
assert parts[-1] == hash_url_to_filename(UpperCamelCase , etag=UpperCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A__ : Dict =extracted_path.read_text()
A__ : Dict =text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase ( UpperCamelCase : int , UpperCamelCase : str ):
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(UpperCamelCase , start=1 ):
A__ : Any =json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
A__ : int =request.getfixturevalue(UpperCamelCase )
A__ : Any =DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(UpperCamelCase ) , start=1 ):
_test_jsonl(UpperCamelCase , UpperCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : int =request.getfixturevalue(UpperCamelCase )
A__ : Tuple =DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(UpperCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(UpperCamelCase ) , start=1 ):
_test_jsonl(UpperCamelCase , UpperCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : str =DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(UpperCamelCase ) , start=1 ):
assert os.path.basename(UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 656 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
lowercase : Any = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
re.sub("<n>" , "" , _lowerCamelCase) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase))
| 94 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase : Any = 'pytorch_model.bin'
lowercase : List[str] = 'pytorch_model.bin.index.json'
lowercase : List[Any] = 'adapter_config.json'
lowercase : str = 'adapter_model.bin'
lowercase : List[str] = 'adapter_model.safetensors'
lowercase : Any = 'tf_model.h5'
lowercase : str = 'tf_model.h5.index.json'
lowercase : Any = 'model.ckpt'
lowercase : Optional[int] = 'flax_model.msgpack'
lowercase : Dict = 'flax_model.msgpack.index.json'
lowercase : Dict = 'model.safetensors'
lowercase : Dict = 'model.safetensors.index.json'
lowercase : Union[str, Any] = 'config.json'
lowercase : Tuple = 'preprocessor_config.json'
lowercase : Tuple = FEATURE_EXTRACTOR_NAME
lowercase : Dict = 'generation_config.json'
lowercase : Dict = 'modelcard.json'
lowercase : Optional[int] = '▁'
lowercase : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase : List[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase : Any = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> List[str]:
'''simple docstring'''
if version.parse(_lowerCamelCase) < version.parse(_lowerCamelCase):
if "dev" in min_version:
__UpperCamelCase : List[str] = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__UpperCamelCase : Optional[Any] = F'This example requires a minimum version of {min_version},'
error_message += F' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers.")
| 94 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_SCREAMING_SNAKE_CASE = "true"
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=82 , SCREAMING_SNAKE_CASE_ : Dict=16 ):
'''simple docstring'''
set_seed(42 )
_lowerCAmelCase = RegressionModel()
_lowerCAmelCase = deepcopy(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def __a(SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int=False ):
'''simple docstring'''
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
_lowerCAmelCase = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[str] ):
_lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
_lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : str ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase = batch.values()
with torch.no_grad():
_lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def __a(SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : List[Any]=82 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}'''
def __a(SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
_lowerCAmelCase = evaluate.load("glue" , "mrpc" )
_lowerCAmelCase , _lowerCAmelCase = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = setup["no"]
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch["labels"] )
_lowerCAmelCase = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase = batch["labels"]
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __a():
'''simple docstring'''
_lowerCAmelCase = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
_lowerCAmelCase = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 512 )
accelerator.state._reset_state()
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Any = True
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase :Union[str, Any] = SpeechTaTokenizer(UpperCAmelCase )
lowerCAmelCase :Any = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
lowerCAmelCase :List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Any ) -> Union[str, Any]:
lowerCAmelCase :Tuple = 'this is a test'
lowerCAmelCase :Union[str, Any] = 'this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase :Dict = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
lowerCAmelCase :List[Any] = '<pad>'
lowerCAmelCase :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Any:
lowerCAmelCase :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCAmelCase ) , 81 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
lowerCAmelCase :Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase :Optional[int] = tokenizer.vocab_size
lowerCAmelCase :Union[str, Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase :Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase :Union[str, Any] = tokenizer.add_tokens(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.vocab_size
lowerCAmelCase :List[Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
lowerCAmelCase :int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase :List[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase :str = tokenizer.add_special_tokens(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
lowerCAmelCase :Tuple = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
lowerCAmelCase :List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : str ) -> int:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase :Optional[int] = self.get_tokenizer()
lowerCAmelCase :List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowerCAmelCase :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase :int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase :Any = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowerCAmelCase :List[str] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCAmelCase , )
| 553 | 0 |
"""simple docstring"""
import string
from math import logaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->int:
'''simple docstring'''
a : Tuple = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
a : Union[str, Any] = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->tuple[int, int]:
'''simple docstring'''
a : Any = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
a : int = corpus_without_punctuation.split("\n" )
a : Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowercase ))
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : Union[str, Any]=False ) ->float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->float:
'''simple docstring'''
return round(tf * idf , 3 )
| 31 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase =logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_5_5 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {'''shortest_edge''': 2_5_6}
UpperCamelCase__ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase__ : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : Dict = size
UpperCamelCase__ : Union[str, Any] = resample
UpperCamelCase__ : Dict = do_center_crop
UpperCamelCase__ : Tuple = crop_size
UpperCamelCase__ : Union[str, Any] = do_rescale
UpperCamelCase__ : Dict = rescale_factor
UpperCamelCase__ : int = do_normalize
UpperCamelCase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : str = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ : Optional[int] = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : int = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : str = size if size is not None else self.size
UpperCamelCase__ : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
UpperCamelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase__ : int = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Tuple = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : List[Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : int = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : Optional[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase__ : Tuple = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = target_sizes.numpy()
UpperCamelCase__ : List[Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Dict = logits.argmax(dim=1 )
UpperCamelCase__ : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 285 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase ={
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 285 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def a__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
_A : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A : str = dict(zip(_a , range(len(_a ) ) ) )
_A : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> str:
_A : Tuple = """lower newer"""
_A : Optional[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> List[Any]:
_A : Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : str = """lower newer"""
_A : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_A : int = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : str = tokens + [tokenizer.unk_token]
_A : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def a__ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Dict = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_A : Tuple = """xa\u0303y""" + """ """ + """x\xe3y"""
_A : Dict = tokenizer_s.tokenize(_a )
_A : Dict = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
_A : Any = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_A : Optional[int] = tokenizer_s.tokenize(_a )
_A : List[Any] = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
_A : int = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_A : Optional[Any] = tokenizer_s.tokenize(_a )
_A : Tuple = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : str = F'''{text_of_1_token} {text_of_1_token}'''
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Any = F''' {text}'''
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def a__ ( self ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def a__ ( self ) -> str:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Union[str, Any]:
# CLIP always lower cases letters
pass
| 54 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(2 , 4)
UpperCamelCase = torch.optim.AdamW(model.parameters() , lr=1.0)
UpperCamelCase = torch.optim.lr_scheduler.OneCycleLR(_UpperCamelCase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1)
UpperCamelCase = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
UpperCamelCase = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase__ ( _UpperCamelCase) -> List[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase__ ( _UpperCamelCase) -> Tuple:
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(_UpperCamelCase)
class A__ ( __UpperCamelCase ):
'''simple docstring'''
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase = Accelerator(cpu=__SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = GradientState()
assert state.num_steps == 1
UpperCamelCase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase = False
assert state.sync_gradients is False
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = create_components()
(
UpperCamelCase
) = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : List[Any] ):
pass
with patch('torch.cuda.set_device' , __SCREAMING_SNAKE_CASE ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
UpperCamelCase = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = get_signature(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase = get_signature(__SCREAMING_SNAKE_CASE )
# saving hook
def save_config(_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
UpperCamelCase = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'data.json' ) , 'w' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# loading hook
def load_config(_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'data.json' ) , 'r' ) as f:
UpperCamelCase = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase = config['''class_name''']
UpperCamelCase = accelerator.register_save_state_pre_hook(__SCREAMING_SNAKE_CASE )
UpperCamelCase = accelerator.register_load_state_pre_hook(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCamelCase = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks removed
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCamelCase = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = create_components()
UpperCamelCase = None
# This should work
UpperCamelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(dummy_obj is None )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = create_components()
UpperCamelCase = [1, 2, 3]
# This should work
UpperCamelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , '_is_accelerate_prepared' , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , '_is_accelerate_prepared' , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , '_is_accelerate_prepared' , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , '_is_accelerate_prepared' , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , '_is_accelerate_prepared' , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , '_is_accelerate_prepared' , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__SCREAMING_SNAKE_CASE , device_map={'': 0} , )
UpperCamelCase = Accelerator()
# This should work
UpperCamelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCamelCase = Accelerator()
with init_empty_weights():
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCamelCase = infer_auto_device_map(__SCREAMING_SNAKE_CASE )
UpperCamelCase = '''cpu'''
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=__SCREAMING_SNAKE_CASE , load_in_abit=__SCREAMING_SNAKE_CASE , llm_inta_enable_fpaa_cpu_offload=__SCREAMING_SNAKE_CASE )
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCamelCase = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCamelCase = infer_auto_device_map(__SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
UpperCamelCase = Accelerator()
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
UpperCamelCase = infer_auto_device_map(__SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
UpperCamelCase = Accelerator()
# This should work
UpperCamelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(10 , 10 )
UpperCamelCase = torch.optim.SGD(model.parameters() , lr=0.0_1 )
UpperCamelCase = Accelerator(cpu=__SCREAMING_SNAKE_CASE )
UpperCamelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
| 280 |
from __future__ import annotations
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : list[list[int]] =[]
create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE )
return result
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_list.pop()
def lowercase_ ( SCREAMING_SNAKE_CASE : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 381 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = StableUnCLIPPipeline
__lowerCamelCase : Any = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowerCamelCase : str = False
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = 32
_lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCamelCase , num_layers=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
_lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> str:
if str(_lowerCamelCase ).startswith("mps" ):
_lowerCAmelCase = torch.manual_seed(_lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> int:
_lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
_lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCAmelCase = pipe("anime turle" , generator=_lowerCamelCase , output_type="np" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( self ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 712 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list[float] , SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase = sorted(numsa + numsa )
_lowerCAmelCase , _lowerCAmelCase = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of first array: ").split()]
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 489 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'MCTCTFeatureExtractor'
__UpperCAmelCase : Optional[Any] = 'AutoTokenizer'
def __init__( self , _a , _a ):
super().__init__(_a , _a )
__a = self.feature_extractor
__a = False
def __call__( self , *_a , **_a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__a = kwargs.pop('''raw_speech''' )
else:
__a = kwargs.pop('''audio''' , _a )
__a = kwargs.pop('''sampling_rate''' , _a )
__a = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__a = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if text is not None:
__a = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_a , **_a )
__a = kwargs.pop('''input_features''' , _a )
__a = kwargs.pop('''labels''' , _a )
if len(_a ) > 0:
__a = args[0]
__a = args[1:]
if input_features is not None:
__a = self.feature_extractor.pad(_a , *_a , **_a )
if labels is not None:
__a = self.tokenizer.pad(_a , **_a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a = labels['''input_ids''']
return input_features
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def __UpperCAmelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 695 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> None:
__a = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
__a , __a , __a = requirement, None, None
else:
__a = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
__a , __a = match[0]
__a = want_full.split(''',''' ) # there could be multiple requirements
__a = {}
for w in want_range:
__a = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
__a , __a = match[0]
__a = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
__a = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Tuple ) -> Optional[Any]:
__a = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 695 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _snake_case ( lowerCamelCase__ : Callable[[int | float], int | float] , lowerCamelCase__ : int | float , lowerCamelCase__ : int | float , lowerCamelCase__ : int = 100 , ) -> float:
lowerCamelCase_ : Dict =x_start
lowerCamelCase_ : Union[str, Any] =fnc(lowerCamelCase__ )
lowerCamelCase_ : Dict =0.0
for _ in range(lowerCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCamelCase_ : List[Any] =(x_end - x_start) / steps + xa
lowerCamelCase_ : Optional[int] =fnc(lowerCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCamelCase_ : Optional[Any] =xa
lowerCamelCase_ : List[str] =fxa
return length
if __name__ == "__main__":
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> str:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
A__ : str = 10
while i <= 100_000:
print(f'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 700 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : int = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
A__ : Dict = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
A__ : Optional[int] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> List[Any]:
return float((preds == labels).mean() )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple="binary" ) -> Tuple:
lowerCamelCase_ : Optional[int] =simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] ) -> int:
lowerCamelCase_ : Optional[int] ={}
for id_pred, label in zip(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] =F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCamelCase_ : Any =id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ : int =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ : Dict =zip(*lowerCamelCase__ )
lowerCamelCase_ : Any =fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ , average="macro" )
fas.append(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase__ ) )
ems.append(lowerCamelCase__ )
lowerCamelCase_ : Any =float(sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) )
lowerCamelCase_ : List[Any] =sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
lowerCamelCase_ : List[str] =float(fa_score(y_true=lowerCamelCase__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def UpperCAmelCase__ ( self : int ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def UpperCAmelCase__ ( self : Dict , snake_case__ : Optional[int] , snake_case__ : Dict ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )}
elif self.config_name == "cb":
return acc_and_fa(snake_case__ , snake_case__ , fa_avg="macro" )
elif self.config_name == "record":
lowerCamelCase_ : List[Any] =[
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowerCamelCase_ : str ={pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(snake_case__ , snake_case__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(snake_case__ , snake_case__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 244 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
__snake_case :ClassVar[Features] = Features({'audio': Audio()} )
__snake_case :ClassVar[Features] = Features({'labels': ClassLabel} )
__snake_case :str = "audio"
__snake_case :str = "labels"
def _a ( self : Any , _lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _lowerCAmelCase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _a ( self : Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 80 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[Any] = LxmertTokenizer
UpperCamelCase : str = LxmertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[int] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = 'UNwant\u00E9d,running'
_lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__magic_name__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 1_0, 8, 9] )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase = tokenizer.tokenize(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(__magic_name__ )
_lowerCAmelCase = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
| 589 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """ctrl"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , snake_case_ : Dict=2_4_6_5_3_4 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Dict=1_2_8_0 , snake_case_ : Union[str, Any]=8_1_9_2 , snake_case_ : Any=4_8 , snake_case_ : List[Any]=1_6 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=1e-6 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Dict=True , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Any = vocab_size
snake_case__ : int = n_positions
snake_case__ : Optional[int] = n_embd
snake_case__ : str = n_layer
snake_case__ : Any = n_head
snake_case__ : str = dff
snake_case__ : Any = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : List[str] = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = use_cache
super().__init__(**snake_case_ )
| 502 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = None
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 2
@register_to_config
def __init__( self : int , snake_case_ : float = 0.0_2 , snake_case_ : float = 1_0_0 , snake_case_ : float = 1.0_0_7 , snake_case_ : float = 8_0 , snake_case_ : float = 0.0_5 , snake_case_ : float = 5_0 , ):
'''simple docstring'''
snake_case__ : List[Any] = sigma_max
# setable values
snake_case__ : int = None
snake_case__ : np.IntTensor = None
snake_case__ : torch.FloatTensor = None # sigma(t_i)
def __magic_name__ ( self : Any , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
'''simple docstring'''
return sample
def __magic_name__ ( self : Optional[int] , snake_case_ : int , snake_case_ : Union[str, torch.device] = None ):
'''simple docstring'''
snake_case__ : Union[str, Any] = num_inference_steps
snake_case__ : Tuple = np.arange(0 , self.num_inference_steps )[::-1].copy()
snake_case__ : int = torch.from_numpy(snake_case_ ).to(snake_case_ )
snake_case__ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
snake_case__ : Union[str, Any] = torch.tensor(snake_case_ , dtype=torch.floataa , device=snake_case_ )
def __magic_name__ ( self : Optional[int] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : Optional[torch.Generator] = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
snake_case__ : List[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
snake_case__ : List[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case__ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=snake_case_ ).to(sample.device )
snake_case__ : Tuple = sigma + gamma * sigma
snake_case__ : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Optional[int] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : float , snake_case_ : torch.FloatTensor , snake_case_ : bool = True , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = sample_hat + sigma_hat * model_output
snake_case__ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
snake_case__ : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case_ , derivative=snake_case_ , pred_original_sample=snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : float , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : bool = True , ):
'''simple docstring'''
snake_case__ : str = sample_prev + sigma_prev * model_output
snake_case__ : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
snake_case__ : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case_ , derivative=snake_case_ , pred_original_sample=snake_case_ )
def __magic_name__ ( self : str , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Tuple ):
'''simple docstring'''
raise NotImplementedError()
| 502 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str = None , UpperCAmelCase : list = None ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =None
lowercase : Any =os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowercase : List[str] =os.path.abspath('''examples''' )
for item in os.listdir(UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase : Union[str, Any] =os.path.join(UpperCAmelCase , UpperCAmelCase )
if os.path.isfile(UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase , feature_script=UpperCAmelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowercase : List[Any] =compare_against_test(
os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[int] ='''\n'''.join(UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
lowercase : Optional[Any] =diff.replace(UpperCAmelCase , '''''' )
self.assertEqual(UpperCAmelCase , '''''' )
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase )
self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase )
def A__ ( self : str ) -> Any:
'''simple docstring'''
lowercase : str =os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowercase : str =[
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = False
@classmethod
def A__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUpClass()
lowercase : List[str] =tempfile.mkdtemp()
lowercase : Union[str, Any] =os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase : Any =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def A__ ( cls : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def A__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase : str =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase : Any =run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase )
self.assertNotIn('''epoch 0:''' , UpperCAmelCase )
self.assertIn('''epoch 1:''' , UpperCAmelCase )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase : Tuple =run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase )
if torch.cuda.is_available():
lowercase : List[Any] =torch.cuda.device_count()
else:
lowercase : Dict =1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , UpperCAmelCase )
self.assertIn('''epoch 1:''' , UpperCAmelCase )
else:
self.assertIn('''epoch 0:''' , UpperCAmelCase )
self.assertIn('''epoch 1:''' , UpperCAmelCase )
@slow
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] ='''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowercase : List[str] =run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase )
lowercase : List[Any] =re.findall('''({.+})''' , UpperCAmelCase )
lowercase : Tuple =[r for r in results if '''accuracy''' in r][-1]
lowercase : Union[str, Any] =ast.literal_eval(UpperCAmelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
lowercase : Tuple =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , '''tracking''' ) ) )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 94 |
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : List[Any] =word.split()
def justify(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
snake_case__ : Optional[Any] =max_width - width
snake_case__ : Tuple =len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case__ : List[Any] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case__ : Tuple =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case__ : str =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
snake_case__ : str =[]
for i in range(SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =[]
snake_case__ : list[str] =[]
snake_case__ : Dict =0
for word in words:
if width + len(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE )
width += len(SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# reset new line and new width
snake_case__, snake_case__ : Tuple =[word], len(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =max_width - width - len(SCREAMING_SNAKE_CASE )
answer.append(''' '''.join(SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=64 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , a__=2 , a__=2 , a__=2 , a__=2 , a__=4 , a__=1 , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = q_groups
_lowerCamelCase = k_groups
_lowerCamelCase = v_groups
_lowerCamelCase = post_attention_groups
_lowerCamelCase = intermediate_groups
_lowerCamelCase = output_groups
def _UpperCAmelCase ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = SqueezeBertModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , a__ )
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = SqueezeBertForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = SqueezeBertForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = SqueezeBertForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = SqueezeBertForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_choices
_lowerCamelCase = SqueezeBertForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase_ ,lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCamelCase = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def _UpperCAmelCase ( self ):
_lowerCamelCase = SqueezeBertModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , dim=37 )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a__ )
@slow
def _UpperCAmelCase ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = SqueezeBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
_lowerCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_lowerCamelCase = model(a__ )[0]
_lowerCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , a__ )
_lowerCamelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-4 ) )
| 711 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase ( _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_a ) == len(_a ), F'''{len(_a )} != {len(_a )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
try:
_lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(_a ) )
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(_a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowerCamelCase ( _a , _a = "student" , _a = None , _a = None , _a=False , _a=None , _a=None , **_a , ):
"""simple docstring"""
_lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(_a , _a ):
AutoTokenizer.from_pretrained(_a ).save_pretrained(_a ) # purely for convenience
_lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_a ).eval()
else:
assert isinstance(_a , _a ), F'''teacher must be a model or string got type {type(_a )}'''
_lowerCamelCase = teacher.config.to_diff_dict()
try:
_lowerCamelCase , _lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_lowerCamelCase = teacher_e
if d is None:
_lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
_lowerCamelCase , _lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_lowerCamelCase , _lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_lowerCamelCase = teacher_e
if d is None:
_lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_a )
# Copy weights
_lowerCamelCase = teacher.config_class(**_a )
_lowerCamelCase = AutoModelForSeqaSeqLM.from_config(_a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=_a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_lowerCamelCase , _lowerCamelCase = list(range(_a ) ), list(range(_a ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(_a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_lowerCamelCase = pick_layers_to_copy(_a , _a )
if d_layers_to_copy is None:
_lowerCamelCase = pick_layers_to_copy(_a , _a )
try:
if hasattr(
_a , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _a )
copy_layers(teacher.decoder.block , student.decoder.block , _a )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(_a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 297 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _A ( _lowerCAmelCase ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Iterable[torch.nn.Parameter] , _lowerCAmelCase : float = 0.9999 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Union[float, int] = 1.0 , _lowerCAmelCase : Union[float, int] = 2 / 3 , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Dict[str, Any] = None , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , torch.nn.Module):
__lowercase =(
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase , )
__lowercase =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__lowercase =True
if kwargs.get('max_value' , _lowerCAmelCase) is not None:
__lowercase ='The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase)
__lowercase =kwargs['max_value']
if kwargs.get('min_value' , _lowerCAmelCase) is not None:
__lowercase ='The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase)
__lowercase =kwargs['min_value']
__lowercase =list(_lowerCAmelCase)
__lowercase =[p.clone().detach() for p in parameters]
if kwargs.get('device' , _lowerCAmelCase) is not None:
__lowercase ='The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase)
self.to(device=kwargs['device'])
__lowercase =None
__lowercase =decay
__lowercase =min_decay
__lowercase =update_after_step
__lowercase =use_ema_warmup
__lowercase =inv_gamma
__lowercase =power
__lowercase =0
__lowercase =None # set in `step()`
__lowercase =model_cls
__lowercase =model_config
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase , __lowercase =model_cls.load_config(_lowerCAmelCase , return_unused_kwargs=_lowerCAmelCase)
__lowercase =model_cls.from_pretrained(_lowerCAmelCase)
__lowercase =cls(model.parameters() , model_cls=_lowerCAmelCase , model_config=model.config)
ema_model.load_state_dict(_lowerCAmelCase)
return ema_model
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.')
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.')
__lowercase =self.model_cls.from_config(self.model_config)
__lowercase =self.state_dict()
state_dict.pop('shadow_params' , _lowerCAmelCase)
model.register_to_config(**_lowerCAmelCase)
self.copy_to(model.parameters())
model.save_pretrained(_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =max(0 , optimization_step - self.update_after_step - 1)
if step <= 0:
return 0.0
if self.use_ema_warmup:
__lowercase =1 - (1 + step / self.inv_gamma) ** -self.power
else:
__lowercase =(1 + step) / (1_0 + step)
__lowercase =min(_lowerCAmelCase , self.decay)
# make sure decay is not smaller than min_decay
__lowercase =max(_lowerCAmelCase , self.min_decay)
return cur_decay_value
@torch.no_grad()
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Iterable[torch.nn.Parameter]):
'''simple docstring'''
if isinstance(_lowerCAmelCase , torch.nn.Module):
__lowercase =(
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase , )
__lowercase =parameters.parameters()
__lowercase =list(_lowerCAmelCase)
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__lowercase =self.get_decay(self.optimization_step)
__lowercase =decay
__lowercase =1 - decay
__lowercase =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _lowerCAmelCase):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__lowercase =deepspeed.zero.GatheredParameters(_lowerCAmelCase , modifier_rank=_lowerCAmelCase)
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param))
else:
s_param.copy_(_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Iterable[torch.nn.Parameter]):
'''simple docstring'''
__lowercase =list(_lowerCAmelCase)
for s_param, param in zip(self.shadow_params , _lowerCAmelCase):
param.data.copy_(s_param.to(param.device).data)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None):
'''simple docstring'''
__lowercase =[
p.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase) if p.is_floating_point() else p.to(device=_lowerCAmelCase)
for p in self.shadow_params
]
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Iterable[torch.nn.Parameter]):
'''simple docstring'''
__lowercase =[param.detach().cpu().clone() for param in parameters]
def __lowerCamelCase ( self : int , _lowerCAmelCase : Iterable[torch.nn.Parameter]):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`')
for c_param, param in zip(self.temp_stored_params , _lowerCAmelCase):
param.data.copy_(c_param.data)
# Better memory-wise.
__lowercase =None
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : dict):
'''simple docstring'''
__lowercase =copy.deepcopy(_lowerCAmelCase)
__lowercase =state_dict.get('decay' , self.decay)
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
__lowercase =state_dict.get('min_decay' , self.min_decay)
if not isinstance(self.min_decay , _lowerCAmelCase):
raise ValueError('Invalid min_decay')
__lowercase =state_dict.get('optimization_step' , self.optimization_step)
if not isinstance(self.optimization_step , _lowerCAmelCase):
raise ValueError('Invalid optimization_step')
__lowercase =state_dict.get('update_after_step' , self.update_after_step)
if not isinstance(self.update_after_step , _lowerCAmelCase):
raise ValueError('Invalid update_after_step')
__lowercase =state_dict.get('use_ema_warmup' , self.use_ema_warmup)
if not isinstance(self.use_ema_warmup , _lowerCAmelCase):
raise ValueError('Invalid use_ema_warmup')
__lowercase =state_dict.get('inv_gamma' , self.inv_gamma)
if not isinstance(self.inv_gamma , (float, int)):
raise ValueError('Invalid inv_gamma')
__lowercase =state_dict.get('power' , self.power)
if not isinstance(self.power , (float, int)):
raise ValueError('Invalid power')
__lowercase =state_dict.get('shadow_params' , _lowerCAmelCase)
if shadow_params is not None:
__lowercase =shadow_params
if not isinstance(self.shadow_params , _lowerCAmelCase):
raise ValueError('shadow_params must be a list')
if not all(isinstance(_lowerCAmelCase , torch.Tensor) for p in self.shadow_params):
raise ValueError('shadow_params must all be Tensors')
| 474 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'out_channels': 3_2,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =[1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'out_channels': 3_2,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =[0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase ={
'in_channels': 3_2,
'temb_channels': 1_2_8,
}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =[0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =[0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase , __lowercase =super().prepare_init_args_and_inputs_for_common()
__lowercase =3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent')
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =[0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =[0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase ={'in_channels': 3_2, 'out_channels': 3_2}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =[0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_lowerCAmelCase)
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ={'in_channels': 3_2, 'out_channels': 3_2}
__lowercase =self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =[0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_lowerCAmelCase)
| 474 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCAmelCase_ ( snake_case__ , snake_case__=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ''
else:
lowerCAmelCase__ = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(snake_case__ )
lowerCAmelCase__ = val
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase__ = 1000
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'imagenet-1k-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = int(deit_name[-6:-4] )
lowerCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
lowerCAmelCase__ = 192
lowerCAmelCase__ = 768
lowerCAmelCase__ = 12
lowerCAmelCase__ = 3
elif deit_name[9:].startswith('small' ):
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1536
lowerCAmelCase__ = 12
lowerCAmelCase__ = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
lowerCAmelCase__ = 1024
lowerCAmelCase__ = 4096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
# load original model from timm
lowerCAmelCase__ = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = timm_model.state_dict()
lowerCAmelCase__ = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
lowerCAmelCase__ = DeiTForImageClassificationWithTeacher(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase__ = DeiTImageProcessor(size=snake_case__ , crop_size=config.image_size )
lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ = encoding['pixel_values']
lowerCAmelCase__ = model(snake_case__ )
lowerCAmelCase__ = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 604 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCAmelCase_ ( snake_case__="" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = torch.rand(12 ,dtype=torch.floataa ) - 0.5
lowerCAmelCase__ = AgentAudio(a_ )
lowerCAmelCase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(a_ ,agent_type.to_raw() ,atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(a_ ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(a_ )
self.assertTrue(torch.allclose(a_ ,torch.tensor(a_ ) ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = torch.rand(12 ,dtype=torch.floataa ) - 0.5
lowerCAmelCase__ = get_new_path(suffix='.wav' )
sf.write(a_ ,a_ ,1_6000 )
lowerCAmelCase__ = AgentAudio(a_ )
self.assertTrue(torch.allclose(a_ ,agent_type.to_raw() ,atol=1e-4 ) )
self.assertEqual(agent_type.to_string() ,a_ )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = torch.randint(0 ,256 ,(64, 64, 3) )
lowerCAmelCase__ = AgentImage(a_ )
lowerCAmelCase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(a_ ,agent_type._tensor ,atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
lowerCAmelCase__ = Image.open(a_ )
lowerCAmelCase__ = AgentImage(a_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
lowerCAmelCase__ = Image.open(a_ )
lowerCAmelCase__ = AgentImage(a_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(a_ ) )
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'Hey!'
lowerCAmelCase__ = AgentText(a_ )
self.assertEqual(a_ ,agent_type.to_string() )
self.assertEqual(a_ ,agent_type.to_raw() )
self.assertEqual(a_ ,a_ )
| 604 | 1 |
from __future__ import annotations
from collections.abc import Callable
def a__ ( snake_case , snake_case , snake_case , snake_case = 100 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = x_start
__SCREAMING_SNAKE_CASE : List[Any] = fnc(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__SCREAMING_SNAKE_CASE : List[Any] = (x_end - x_start) / steps + xa
__SCREAMING_SNAKE_CASE : List[str] = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__SCREAMING_SNAKE_CASE : Union[str, Any] = xa
__SCREAMING_SNAKE_CASE : Tuple = fxa
return area
if __name__ == "__main__":
def a__ ( snake_case ):
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 74 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCamelCase ( ):
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''rsa''' , 1024 )
print('''Key files generation successful.''' )
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
print('''Generating prime p...''' )
A : Optional[Any] = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
A : Tuple = rabinMiller.generate_large_prime(lowerCamelCase_ )
A : Optional[Any] = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
A : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
A : Optional[Any] = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
A : List[Any] = (n, e)
A : Optional[Any] = (n, d)
return (public_key, private_key)
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: int ):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
A , A : Union[str, Any] = generate_key(lowerCamelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 256 | 0 |
from __future__ import annotations
import math
def __UpperCamelCase ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
)
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase_ =math.log(len(_A ) , 2 )
print(f'Optimal value : {minimax(0 , 0 , _A , _A , _A )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__A : List[Any] = 'src/transformers'
__A : Tuple = 'docs/source/en'
__A : Optional[int] = '.'
def __UpperCamelCase ( _A : Tuple , _A : Tuple , _A : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Find the start prompt.
lowerCamelCase_ =0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
lowerCamelCase_ =start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__A : Dict = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__A : Optional[int] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__A : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__A : str = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__A : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( _A : List[Any] ) ->str:
"""simple docstring"""
lowerCamelCase_ =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _A )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( _A : Union[str, Any] , _A : List[str] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =2 if text == """✅""" or text == """❌""" else len(_A )
lowerCamelCase_ =(width - text_length) // 2
lowerCamelCase_ =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ) ->Any:
"""simple docstring"""
lowerCamelCase_ =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCamelCase_ ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
lowerCamelCase_ =collections.defaultdict(_A )
# Let's lookup through all transformers object (once).
for attr_name in dir(_A ):
lowerCamelCase_ =None
if attr_name.endswith("""Tokenizer""" ):
lowerCamelCase_ =slow_tokenizers
lowerCamelCase_ =attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCamelCase_ =fast_tokenizers
lowerCamelCase_ =attr_name[:-13]
elif _re_tf_models.match(_A ) is not None:
lowerCamelCase_ =tf_models
lowerCamelCase_ =_re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
lowerCamelCase_ =flax_models
lowerCamelCase_ =_re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
lowerCamelCase_ =pt_models
lowerCamelCase_ =_re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCamelCase_ =True
break
# Try again after removing the last word in the name
lowerCamelCase_ ="""""".join(camel_case_split(_A )[:-1] )
# Let's build that table!
lowerCamelCase_ =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCamelCase_ =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCamelCase_ =[len(_A ) + 2 for c in columns]
lowerCamelCase_ =max([len(_A ) for name in model_names] ) + 2
# Build the table per se
lowerCamelCase_ ="""|""" + """|""".join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCamelCase_ ={True: """✅""", False: """❌"""}
for name in model_names:
lowerCamelCase_ =model_name_to_prefix[name]
lowerCamelCase_ =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n"
return table
def __UpperCamelCase ( _A : str=False ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =_find_text_in_file(
filename=os.path.join(_A , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCamelCase_ =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 75 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def __magic_name__( self :Union[str, Any] ) -> int:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE : Tuple = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE : List[str] = KarrasVePipeline(unet=__snake_case , scheduler=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(num_inference_steps=2 , generator=__snake_case , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(num_inference_steps=2 , generator=__snake_case , output_type='''numpy''' , return_dict=__snake_case )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = """google/ncsnpp-celebahq-256"""
__SCREAMING_SNAKE_CASE : str = UNetaDModel.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE : int = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE : List[Any] = KarrasVePipeline(unet=__snake_case , scheduler=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = pipe(num_inference_steps=20 , generator=__snake_case , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 696 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 611 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( *_lowercase :List[Any] , **_lowercase :Tuple ):
'''simple docstring'''
pass
def _A ( __magic_name__ ):
lowercase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[int] , _lowercase :str ):
'''simple docstring'''
lowercase__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _lowercase )
import datasets
lowercase__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "Intel/dpt-large"
lowercase__ = pipeline("depth-estimation" , model=_lowercase )
lowercase__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowercase__ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 611 | 1 |
"""simple docstring"""
from manim import *
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Loaded Checkpoint' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Disk' , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 247 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = """convnextv2"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=224 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_stages
SCREAMING_SNAKE_CASE = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 247 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase_ ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = []
for i in range(self.num_layers ):
_UpperCAmelCase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase : int = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = resnets
_UpperCAmelCase : Tuple = attentions
if self.add_downsample:
_UpperCAmelCase : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=True ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_UpperCAmelCase : List[str] = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
_UpperCAmelCase : List[str] = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase : Union[str, Any] = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def a_ ( self : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for i in range(self.num_layers ):
_UpperCAmelCase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
_UpperCAmelCase : Dict = resnets
if self.add_downsample:
_UpperCAmelCase : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=True ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = ()
for resnet in self.resnets:
_UpperCAmelCase : str = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase : Dict = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase_ ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = []
_UpperCAmelCase : Union[str, Any] = []
for i in range(self.num_layers ):
_UpperCAmelCase : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase : List[str] = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
_UpperCAmelCase : int = resnets
_UpperCAmelCase : List[Any] = attentions
if self.add_upsample:
_UpperCAmelCase : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=True ) -> Optional[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_UpperCAmelCase : Tuple = res_hidden_states_tuple[-1]
_UpperCAmelCase : Dict = res_hidden_states_tuple[:-1]
_UpperCAmelCase : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
_UpperCAmelCase : Tuple = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
_UpperCAmelCase : List[str] = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
for i in range(self.num_layers ):
_UpperCAmelCase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase : Any = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
_UpperCAmelCase : int = resnets
if self.add_upsample:
_UpperCAmelCase : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=True ) -> Optional[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
_UpperCAmelCase : int = res_hidden_states_tuple[-1]
_UpperCAmelCase : List[str] = res_hidden_states_tuple[:-1]
_UpperCAmelCase : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase : Optional[int] = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
_UpperCAmelCase : Tuple = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class lowerCAmelCase_ ( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def a_ ( self : Any ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_UpperCAmelCase : Union[str, Any] = []
for _ in range(self.num_layers ):
_UpperCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
_UpperCAmelCase : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = resnets
_UpperCAmelCase : Optional[Any] = attentions
def __call__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=True ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_UpperCAmelCase : Optional[Any] = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
_UpperCAmelCase : Any = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
return hidden_states
| 416 |
def _A ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase : List[Any] = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase : List[Any] = True
if a[i].islower():
_UpperCAmelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
lowerCAmelCase : List[str] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase : Any = sorted(arg_to_scheduler.keys())
lowerCAmelCase : Optional[int] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class SCREAMING_SNAKE_CASE__ ( pl.LightningModule):
def __init__( self , A_ , A_=None , A_="base" , A_=None , A_=None , A_=None , **A_ , )-> str:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(A_ )
UpperCamelCase = 0
UpperCamelCase = Path(self.hparams.output_dir )
UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=A_ , **A_ , )
else:
UpperCamelCase = config
UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , A_ , A_ ):
assert hasattr(self.config , A_ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , A_ , getattr(self.hparams , A_ ) )
if tokenizer is None:
UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=A_ , )
else:
UpperCamelCase = tokenizer
UpperCamelCase = MODEL_MODES[mode]
if model is None:
UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=A_ , )
else:
UpperCamelCase = model
def UpperCAmelCase_ ( self , *A_ , **A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.model_type.from_pretrained(*A_ , **A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.model
UpperCamelCase = ['bias', 'LayerNorm.weight']
UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
UpperCamelCase = Adafactor(
A_ , lr=self.hparams.learning_rate , scale_parameter=A_ , relative_step=A_ )
else:
UpperCamelCase = AdamW(
A_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCamelCase = optimizer
UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase_ ( self , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
return self.validation_step(A_ , A_ )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
return self.validation_end(A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
if stage == "test":
UpperCamelCase = len(self.test_dataloader().dataset )
else:
UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=A_ )
UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = False )-> Dict:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.train_loader
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=A_ )
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
A_ , list(filter(A_ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
UpperCamelCase = self.step_count
self.model.save_pretrained(A_ )
self.tokenizer.save_pretrained(A_ )
@staticmethod
def UpperCAmelCase_ ( A_ , A_ )-> Tuple:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=A_ , type=A_ , required=A_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=A_ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=A_ , type=A_ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(A_ ).parent / 'test_run' / 'cache' ) , type=A_ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=A_ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=A_ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=A_ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=A_ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=A_ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=A_ , metavar=A_ , type=A_ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=A_ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=A_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=A_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=A_ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=A_ )
parser.add_argument('--train_batch_size' , default=32 , type=A_ )
parser.add_argument('--eval_batch_size' , default=32 , type=A_ )
parser.add_argument('--adafactor' , action='store_true' )
class SCREAMING_SNAKE_CASE__ ( pl.Callback):
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE__ ( pl.Callback):
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(A_ )
class SCREAMING_SNAKE_CASE__ ( pl.Callback):
def UpperCAmelCase_ ( self , A_ , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
UpperCamelCase = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> List[str]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
def UpperCAmelCase_ ( self , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
UpperCamelCase = trainer.callback_metrics
# Log and save results to file
UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(A_ , 'w' ) as writer:
for key in sorted(A_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(A_ , str(metrics[key] ) ) )
def A_( A : Optional[int] , A : Tuple):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(A).parent / 'test_run' / 'model_checkpoints') , type=A , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=A , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=A)
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=A , help='Max gradient norm')
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.')
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.')
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=A , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=A , default=42 , help='random seed for initialization')
parser.add_argument(
'--data_dir' , default=str(Path(A).parent / 'test_run' / 'dummy-train-data') , type=A , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A_( A : BaseTransformer , A : argparse.Namespace , A : Optional[int]=None , A : Optional[int]=True , A : Dict=[] , A : str=None , A : List[str]=None , **A : Tuple , ):
pl.seed_everything(args.seed)
# init model
UpperCamelCase = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=A)
# add custom checkpoints
if checkpoint_callback is None:
UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1)
if early_stopping_callback:
extra_callbacks.append(A)
if logging_callback is None:
UpperCamelCase = LoggingCallback()
UpperCamelCase = {}
if args.fpaa:
UpperCamelCase = 16
if args.gpus > 1:
UpperCamelCase = 'auto'
UpperCamelCase = 'ddp'
UpperCamelCase = args.accumulate_grad_batches
UpperCamelCase = None
UpperCamelCase = 'auto'
UpperCamelCase = pl.Trainer.from_argparse_args(
A , weights_summary=A , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=A , val_check_interval=1 , num_sanity_val_steps=2 , **A , )
if args.do_train:
trainer.fit(A)
else:
print('RAG modeling tests with new set functions successfuly executed!')
return trainer
| 3 |
def UpperCAmelCase_ ( UpperCAmelCase__ = "The quick brown fox jumps over the lazy dog" , ):
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase__ ) == 2_6
def UpperCAmelCase_ ( UpperCAmelCase__ = "The quick brown fox jumps over the lazy dog" , ):
lowercase_ = [False] * 2_6
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def UpperCAmelCase_ ( ):
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCAmelCase__ ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCAmelCase__ ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 412 | 0 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
try:
lowerCAmelCase__ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase__ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase__ : Tuple = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
snake_case = parse_flag_from_env("""RUN_SLOW""", default=False)
snake_case = parse_flag_from_env("""RUN_REMOTE""", default=False)
snake_case = parse_flag_from_env("""RUN_LOCAL""", default=True)
snake_case = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
snake_case = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
snake_case = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
snake_case = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
snake_case = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
snake_case = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
snake_case = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
snake_case = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCAmelCase__ : List[str] = unittest.skip("test requires faiss" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCAmelCase__ : Optional[int] = unittest.skip("test requires regex" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase__ : List[Any] = unittest.skip("test requires elasticsearch" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase__ : List[str] = unittest.skip("test requires sqlalchemy" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCAmelCase__ : List[Any] = unittest.skip("test requires PyTorch" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCAmelCase__ : Optional[int] = unittest.skip("test requires TensorFlow" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCAmelCase__ : Optional[Any] = unittest.skip("test requires JAX" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCAmelCase__ : int = unittest.skip("test requires Pillow" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def _require_spacy_model(lowerCamelCase_ ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase_ )
else:
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase__ : int = unittest.skip("test is slow" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase__ : Tuple = unittest.skip("test is local" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase__ : List[str] = unittest.skip("test is packaged" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase__ : Union[str, Any] = unittest.skip("test requires remote" )(lowerCamelCase_ )
return test_case
def UpperCAmelCase_ ( *lowerCamelCase_ ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith("test" ):
for decorator in decorators:
lowerCAmelCase__ : Optional[Any] = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class lowerCAmelCase ( UpperCamelCase_ ):
pass
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : List[Any] = 0
A_ : int = 1
A_ : Any = 2
@contextmanager
def UpperCAmelCase_ ( lowerCamelCase_=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase_=1e-1_6 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = requests.Session().request
def timeout_request(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase__ : Union[str, Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
lowerCAmelCase__ : Union[str, Any] = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase__ : Union[str, Any] = url
lowerCAmelCase__ : List[Any] = e.args[0]
lowerCAmelCase__ : Tuple = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
lowerCAmelCase__ : str = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def UpperCAmelCase_ ( *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def UpperCAmelCase_ ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCAmelCase__ : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase_ ( ):
"""simple docstring"""
import gc
gc.collect()
lowerCAmelCase__ : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith("500" ) or str(lowerCamelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class lowerCAmelCase :
def __init__( self : Optional[Any] , a__ : Any , a__ : Optional[Any] , a__ : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = returncode
lowerCAmelCase__ : str = stdout
lowerCAmelCase__ : Tuple = stderr
async def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
while True:
lowerCAmelCase__ : Optional[int] = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
lowerCAmelCase__ : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : str = []
def tee(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="" ):
lowerCAmelCase__ : int = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=1_8_0 , lowerCamelCase_=False , lowerCamelCase_=True ):
"""simple docstring"""
lowerCAmelCase__ : Any = asyncio.get_event_loop()
lowerCAmelCase__ : Any = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
lowerCAmelCase__ : Union[str, Any] = " ".join(lowerCamelCase_ )
if result.returncode > 0:
lowerCAmelCase__ : List[str] = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCAmelCase__ : Optional[Any] = re.sub(R"^gw" , "" , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 2_9_5_0_0
lowerCAmelCase__ : Optional[Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 568 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""google/rembert""": 2_56,
}
snake_case = """▁"""
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = RemBertTokenizer
def __init__( self : Dict , a__ : int=None , a__ : List[Any]=None , a__ : List[Any]=True , a__ : Dict=True , a__ : int=False , a__ : Tuple="[CLS]" , a__ : Optional[int]="[SEP]" , a__ : Optional[Any]="<unk>" , a__ : List[str]="[SEP]" , a__ : Any="<pad>" , a__ : List[str]="[CLS]" , a__ : int="[MASK]" , **a__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : List[str] = remove_space
lowerCAmelCase__ : Optional[int] = keep_accents
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def _A ( self : Tuple , a__ : List[int] , a__ : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[str] , a__ : str , a__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error("Vocabulary path ({}) should be a directory".format(a__ ) )
return
lowerCAmelCase__ : List[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 568 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Union[str, Any] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "levit"
def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=3 , snake_case__=2 , snake_case__=1 , snake_case__=16 , snake_case__=[128, 256, 384] , snake_case__=[4, 8, 12] , snake_case__=[4, 4, 4] , snake_case__=[16, 16, 16] , snake_case__=0 , snake_case__=[2, 2, 2] , snake_case__=[2, 2, 2] , snake_case__=0.02 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase__ : Optional[Any]= image_size
lowercase__ : Dict= num_channels
lowercase__ : Optional[int]= kernel_size
lowercase__ : Dict= stride
lowercase__ : Optional[Any]= padding
lowercase__ : int= hidden_sizes
lowercase__ : str= num_attention_heads
lowercase__ : List[str]= depths
lowercase__ : int= key_dim
lowercase__ : Optional[int]= drop_path_rate
lowercase__ : Optional[Any]= patch_size
lowercase__ : Any= attention_ratio
lowercase__ : str= mlp_ratio
lowercase__ : Tuple= initializer_range
lowercase__ : Optional[int]= [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = version.parse("1.11" )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return 1e-4
| 218 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "WhisperFeatureExtractor"
__lowerCamelCase = "WhisperTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
lowercase__ : List[Any]= self.feature_extractor
lowercase__ : Dict= False
def UpperCAmelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=snake_case__ , language=snake_case__ , no_timestamps=snake_case__ )
def __call__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
lowercase__ : Tuple= kwargs.pop("audio" , snake_case__ )
lowercase__ : Optional[int]= kwargs.pop("sampling_rate" , snake_case__ )
lowercase__ : Union[str, Any]= kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowercase__ : List[Any]= args[0]
lowercase__ : str= args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowercase__ : Tuple= self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowercase__ : Optional[Any]= self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ : str= encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(snake_case__ , return_tensors=snake_case__ )
| 218 | 1 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase__ , UpperCamelCase__ = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase__ , UpperCamelCase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCAmelCase = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
_lowerCAmelCase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(__magic_name__ , return_tensors='np' )
_lowerCAmelCase = processor(images=__magic_name__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = processor(text=__magic_name__ )
_lowerCAmelCase = tokenizer(__magic_name__ , padding='max_length' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(__magic_name__ )
_lowerCAmelCase = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_lowerCAmelCase = 'lower newer'
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 589 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def A__ ( __lowerCamelCase ):
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowerCAmelCase = key.replace('heads.cmd.mim_head.cls.predictions', 'mmm_image_head' )
_lowerCAmelCase = key.replace('heads.cmd.mlm_head.cls.predictions', 'mmm_text_head' )
_lowerCAmelCase = key.replace('heads.cmd.itm_head.cls', 'itm_head' )
_lowerCAmelCase = key.replace('heads.cmd.itm_head.pooler', 'itm_head.pooler' )
_lowerCAmelCase = key.replace('heads.cmd.clip_head.logit_scale', 'flava.logit_scale' )
_lowerCAmelCase = key.replace('heads.fairseq_mlm.cls.predictions', 'mlm_head' )
_lowerCAmelCase = key.replace('heads.imagenet.mim_head.cls.predictions', 'mim_head' )
_lowerCAmelCase = key.replace('mm_text_projection', 'flava.text_to_mm_projection' )
_lowerCAmelCase = key.replace('mm_image_projection', 'flava.image_to_mm_projection' )
_lowerCAmelCase = key.replace('image_encoder.module', 'flava.image_model' )
_lowerCAmelCase = key.replace('text_encoder.module', 'flava.text_model' )
_lowerCAmelCase = key.replace('mm_encoder.module.encoder.cls_token', 'flava.multimodal_model.cls_token' )
_lowerCAmelCase = key.replace('mm_encoder.module', 'flava.multimodal_model' )
_lowerCAmelCase = key.replace('text_projection', 'flava.text_projection' )
_lowerCAmelCase = key.replace('image_projection', 'flava.image_projection' )
_lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
_lowerCAmelCase = value
return upgrade
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
"""simple docstring"""
if config_path is not None:
_lowerCAmelCase = FlavaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = FlavaConfig()
_lowerCAmelCase = FlavaForPreTraining(__lowerCamelCase ).eval()
_lowerCAmelCase = convert_dalle_checkpoint(__lowerCamelCase, __lowerCamelCase, save_checkpoint=__lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
_lowerCAmelCase = torch.load(__lowerCamelCase, map_location='cpu' )
else:
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase, map_location='cpu' )
_lowerCAmelCase = upgrade_state_dict(__lowerCamelCase, __lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
_lowerCAmelCase = hf_model.state_dict()
_lowerCAmelCase = count_parameters(__lowerCamelCase )
_lowerCAmelCase = count_parameters(__lowerCamelCase ) + count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1e-3 )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a__ : str = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 589 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = ["pixel_values"]
def __init__( self : str , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**__UpperCamelCase)
lowercase__ = size if size is not None else {'shortest_edge': 2_24}
lowercase__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
lowercase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowercase__ = get_size_dict(__UpperCamelCase , param_name='crop_size')
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase__ = int((2_56 / 2_24) * size['shortest_edge'])
lowercase__ = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase)
lowercase__ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''')
return resize(
__UpperCamelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(__UpperCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''')
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase : Optional[TensorType] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ) -> BatchFeature:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase)
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(__UpperCamelCase , param_name='crop_size')
lowercase__ = make_list_of_images(__UpperCamelCase)
if not valid_images(__UpperCamelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(__UpperCamelCase) for image in images]
if do_resize:
lowercase__ = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(__UpperCamelCase , __UpperCamelCase) for image in images]
if do_rescale:
lowercase__ = [self.rescale(__UpperCamelCase , __UpperCamelCase) for image in images]
if do_normalize:
lowercase__ = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) for image in images]
lowercase__ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
@staticmethod
def lowerCamelCase_ ( *__A : Optional[Any],**__A : int ):
pass
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A_ ( _lowerCAmelCase : Image ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.array(_lowerCAmelCase )
_lowerCamelCase : str = npimg.shape
return {"hash": hashimage(_lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase_ ( self : int,__A : Any,__A : Optional[int],__A : Optional[int] ):
_lowerCamelCase : Tuple = MaskGenerationPipeline(model=__A,image_processor=__A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Union[str, Any] ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def lowerCamelCase_ ( self : str ):
pass
@slow
@require_torch
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = pipeline("mask-generation",model="facebook/sam-vit-huge" )
_lowerCamelCase : Tuple = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg",points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCamelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8871}
],)
# fmt: on
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = "facebook/sam-vit-huge"
_lowerCamelCase : Tuple = pipeline("mask-generation",model=__A )
_lowerCamelCase : Union[str, Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg",pred_iou_thresh=1,points_per_batch=2_5_6 )
# Shortening by hashing
_lowerCamelCase : List[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__A,decimals=4 ),[
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
],)
| 44 |
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
return math.pow(_UpperCAmelCase , 2 ) - a
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
return 2 * x
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
_a = 2.0
while start <= a:
_a = math.pow(_UpperCAmelCase , 2 )
return start
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 9999 , _UpperCAmelCase = 0.00000000000001 ) -> float:
if a < 0:
raise ValueError('math domain error' )
_a = get_initial_point(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
_a = value
_a = value - fx(_UpperCAmelCase , _UpperCAmelCase ) / fx_derivative(_UpperCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 562 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 701 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
UpperCAmelCase_ ={
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
UpperCAmelCase_ ={
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_snake_case )
return pairs
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] =VOCAB_FILES_NAMES
__a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ):
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = merges_file
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
self.add_from_file(UpperCAmelCase_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self ):
return len(self.encoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCAmelCase_ ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = []
lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self , UpperCAmelCase_ ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCAmelCase_ ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def __snake_case ( self , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase = f.readlines()
for lineTmp in lines:
lowerCAmelCase = lineTmp.strip()
lowerCAmelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCAmelCase = line[:idx]
lowerCAmelCase = len(self.encoder )
| 33 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ : Optional[int] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=8 ) -> Optional[int]:
lowerCamelCase : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase : Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase__ (snake_case_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Tuple:
super().__init__()
self.register_modules(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
lowerCamelCase : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
if latents is None:
lowerCamelCase : str = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase : Optional[int] = latents.to(UpperCamelCase__ )
lowerCamelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self , UpperCamelCase__=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase : List[str] = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCamelCase : Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase : Optional[int] = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> int:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 100 , UpperCamelCase__ = 4.0 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> str:
lowerCamelCase : Optional[int] = self._execution_device
lowerCamelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Any = torch.cat(UpperCamelCase__ , dim=0 )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = torch.cat(UpperCamelCase__ , dim=0 )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : str = torch.cat(UpperCamelCase__ , dim=0 )
lowerCamelCase : int = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCamelCase : Tuple = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : Union[str, Any] = hint.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ )
lowerCamelCase : Dict = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
lowerCamelCase : Tuple = self.scheduler.timesteps
lowerCamelCase : Dict = self.movq.config.latent_channels
lowerCamelCase : Dict = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
# create initial latent
lowerCamelCase : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Tuple = {'image_embeds': image_embeds, 'hint': hint}
lowerCamelCase : Dict = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase : int = noise_pred.chunk(2 )
lowerCamelCase : Optional[Any] = variance_pred.chunk(2 )
lowerCamelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : List[str] = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0]
# post-processing
lowerCamelCase : List[Any] = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase : Union[str, Any] = image * 0.5 + 0.5
lowerCamelCase : str = image.clamp(0 , 1 )
lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase : Dict = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 311 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
a_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
a_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Any , snake_case : Optional[Any]=4 , snake_case : str=False ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = compute_bleu(
reference_corpus=snake_case , translation_corpus=snake_case , max_order=snake_case , smooth=snake_case )
((UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 417 | 0 |
from statistics import mean
import numpy as np
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = 0
# Number of processes finished
_lowerCamelCase : str = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_lowerCamelCase : Dict = [0] * no_of_process
# List to include calculation results
_lowerCamelCase : List[str] = [0] * no_of_process
# Sort by arrival time.
_lowerCamelCase : str = [burst_time[i] for i in np.argsort(lowercase__ )]
_lowerCamelCase : List[Any] = [process_name[i] for i in np.argsort(lowercase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
_lowerCamelCase : Dict = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_lowerCamelCase : Union[str, Any] = arrival_time[i]
_lowerCamelCase : int = 0
# Index showing the location of the process being performed
_lowerCamelCase : List[Any] = 0
# Saves the current response ratio.
_lowerCamelCase : Union[str, Any] = 0
for i in range(0 , lowercase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_lowerCamelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_lowerCamelCase : str = temp
_lowerCamelCase : List[str] = i
# Calculate the turn around time
_lowerCamelCase : List[str] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_lowerCamelCase : int = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = [0] * no_of_process
for i in range(0 , lowercase__ ):
_lowerCamelCase : Dict = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowercase__ = 5
lowercase__ = ["""A""", """B""", """C""", """D""", """E"""]
lowercase__ = [1, 2, 3, 4, 5]
lowercase__ = [1, 2, 3, 4, 5]
lowercase__ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowercase__ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"
F"{turn_around_time[i]}\t\t\t{waiting_time[i]}"
)
print(F"average waiting time : {mean(waiting_time):.5f}")
print(F"average turn around time : {mean(turn_around_time):.5f}")
| 718 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = retriever_proj_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = num_candidates
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Reader config
_lowerCamelCase : Tuple = span_hidden_size
_lowerCamelCase : int = max_span_width
_lowerCamelCase : Tuple = reader_layer_norm_eps
_lowerCamelCase : Union[str, Any] = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : Optional[Any] = num_block_records
_lowerCamelCase : str = searcher_beam_size
| 492 | 0 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self ) -> None:
"""simple docstring"""
snake_case__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
snake_case__ : int = False
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = self
for char in word:
if char not in curr.nodes:
snake_case__ : int = TrieNode()
snake_case__ : List[Any] = curr.nodes[char]
snake_case__ : Any = True
def lowercase__ ( self , lowerCamelCase ) -> bool:
"""simple docstring"""
snake_case__ : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
snake_case__ : List[Any] = curr.nodes[char]
return curr.is_leaf
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
def _delete(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool:
if index == len(lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case__ : str = False
return len(curr.nodes ) == 0
snake_case__ : List[str] = word[index]
snake_case__ : Optional[Any] = curr.nodes.get(lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case__ : Any = _delete(lowerCamelCase , lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowerCamelCase , 0 )
def _A ( snake_case__ : TrieNode , snake_case__ : str ):
if node.is_leaf:
print(snake_case__ , end=''' ''' )
for key, value in node.nodes.items():
print_words(snake_case__ , word + key )
def _A ( ):
snake_case__ : Union[str, Any] = '''banana bananas bandana band apple all beast'''.split()
snake_case__ : Union[str, Any] = TrieNode()
root.insert_many(snake_case__ )
# print_words(root, "")
assert all(root.find(snake_case__ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def _A ( snake_case__ : str , snake_case__ : bool ):
print(str(snake_case__ ) , '''works!''' if passes else '''doesn\'t work :(''' )
def _A ( ):
assert test_trie()
def _A ( ):
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 261 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( snake_case__ : List[str]="ro" , snake_case__ : int="en" , snake_case__ : Any="wmt16" , snake_case__ : Optional[Any]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
snake_case__ : List[Any] = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
snake_case__ : Optional[Any] = datasets.load_dataset(snake_case__ , snake_case__ )
if save_dir is None:
snake_case__ : Optional[int] = f'''{dataset}-{pair}'''
snake_case__ : Optional[int] = Path(snake_case__ )
save_dir.mkdir(exist_ok=snake_case__ )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
snake_case__ : Optional[int] = '''val''' if split == '''validation''' else split
snake_case__ : Optional[Any] = save_dir.joinpath(f'''{fn}.source''' )
snake_case__ : Any = save_dir.joinpath(f'''{fn}.target''' )
snake_case__ : Union[str, Any] = src_path.open('''w+''' )
snake_case__ : str = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case__ : int = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 261 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCamelCase ( __snake_case : Tuple ) -> Any:
"""simple docstring"""
A__ : List[Any] =torch.exp(__snake_case )
A__ : Optional[int] =torch.sum(__snake_case, dim=1 ) # sum of exp(x_i)
A__ : Union[str, Any] =torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
super().__init__()
A__ : str =config.output_attentions
A__ : Any =config.output_hidden_states
A__ : Dict =nn.ModuleList([BertLayer(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
A__ : int =nn.ModuleList([BertHighway(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] )
A__ : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
if (type(lowerCAmelCase_ ) is float) or (type(lowerCAmelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
A__ : List[Any] =x
else:
A__ : List[str] =x
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int ) -> Tuple:
'''simple docstring'''
A__ : Any =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , ) -> int:
'''simple docstring'''
A__ : List[str] =()
A__ : List[Any] =()
A__ : Tuple =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
A__ : List[str] =all_hidden_states + (hidden_states,)
A__ : Optional[int] =layer_module(
lowerCAmelCase_ , lowerCAmelCase_ , head_mask[i] , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =layer_outputs[0]
if self.output_attentions:
A__ : Dict =all_attentions + (layer_outputs[1],)
A__ : Tuple =(hidden_states,)
if self.output_hidden_states:
A__ : Any =current_outputs + (all_hidden_states,)
if self.output_attentions:
A__ : Dict =current_outputs + (all_attentions,)
A__ : Dict =self.highway[i](lowerCAmelCase_ )
# logits, pooled_output
if not self.training:
A__ : Optional[int] =highway_exit[0]
A__ : Optional[Any] =entropy(lowerCAmelCase_ )
A__ : Optional[int] =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
A__ : Union[str, Any] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
A__ : Optional[Any] =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCAmelCase_ , i + 1 )
else:
A__ : Optional[Any] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
A__ : str =all_hidden_states + (hidden_states,)
A__ : Union[str, Any] =(hidden_states,)
if self.output_hidden_states:
A__ : str =outputs + (all_hidden_states,)
if self.output_attentions:
A__ : Any =outputs + (all_attentions,)
A__ : Optional[Any] =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , __UpperCAmelCase , )
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : Tuple =config
A__ : Tuple =BertEmbeddings(lowerCAmelCase_ )
A__ : int =DeeBertEncoder(lowerCAmelCase_ )
A__ : Tuple =BertPooler(lowerCAmelCase_ )
self.init_weights()
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
A__ : Any =value
def lowercase__ ( self : str , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase_ )
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
A__ : List[str] =input_ids.size()
elif inputs_embeds is not None:
A__ : str =inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
A__ : Dict =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A__ : Optional[int] =torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ )
if encoder_attention_mask is None:
A__ : Tuple =torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ )
if token_type_ids is None:
A__ : Any =torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A__ : int =self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
A__ : Optional[int] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
A__ : Optional[Any] =encoder_attention_mask[:, None, None, :]
A__ : Optional[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
A__ : Any =(1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A__ : Optional[int] =self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers )
A__ : Union[str, Any] =self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ )
A__ : Tuple =self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
A__ : str =encoder_outputs[0]
A__ : List[str] =self.pooler(lowerCAmelCase_ )
A__ : Dict =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =message
A__ : Optional[Any] =exit_layer # start from 1!
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ : Dict =BertPooler(lowerCAmelCase_ )
A__ : Tuple =nn.Dropout(config.hidden_dropout_prob )
A__ : Union[str, Any] =nn.Linear(config.hidden_size , config.num_labels )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =encoder_outputs[0]
A__ : Union[str, Any] =self.pooler(lowerCAmelCase_ )
# "return" pooler_output
# BertModel
A__ : int =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
A__ : List[str] =bmodel_output[1]
A__ : Optional[int] =self.dropout(lowerCAmelCase_ )
A__ : int =self.classifier(lowerCAmelCase_ )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __UpperCAmelCase , )
class lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ )
A__ : Optional[Any] =config.num_labels
A__ : Dict =config.num_hidden_layers
A__ : int =DeeBertModel(lowerCAmelCase_ )
A__ : Tuple =nn.Dropout(config.hidden_dropout_prob )
A__ : Union[str, Any] =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=-1 , lowerCAmelCase_ : str=False , ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.num_layers
try:
A__ : List[Any] =self.bert(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
A__ : Optional[int] =outputs[1]
A__ : Optional[int] =self.dropout(lowerCAmelCase_ )
A__ : Optional[Any] =self.classifier(lowerCAmelCase_ )
A__ : str =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ : Optional[Any] =e.message
A__ : int =e.exit_layer
A__ : Optional[Any] =outputs[0]
if not self.training:
A__ : Tuple =entropy(lowerCAmelCase_ )
A__ : Union[str, Any] =[]
A__ : Union[str, Any] =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ : Any =MSELoss()
A__ : int =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ : Dict =CrossEntropyLoss()
A__ : List[str] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ : str =[]
for highway_exit in outputs[-1]:
A__ : int =highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ : List[Any] =MSELoss()
A__ : str =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ : Any =CrossEntropyLoss()
A__ : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase_ )
if train_highway:
A__ : Tuple =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ : int =(loss,) + outputs
if not self.training:
A__ : int =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ : Dict =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 717 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( _a ):
UpperCamelCase__ = '''funnel'''
UpperCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self :Dict , __magic_name__ :Union[str, Any]=3_0522 , __magic_name__ :Any=[4, 4, 4] , __magic_name__ :Dict=None , __magic_name__ :Any=2 , __magic_name__ :Dict=768 , __magic_name__ :str=12 , __magic_name__ :Dict=64 , __magic_name__ :Union[str, Any]=3072 , __magic_name__ :List[str]="gelu_new" , __magic_name__ :List[str]=0.1 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=0.1 , __magic_name__ :Tuple=None , __magic_name__ :Optional[Any]=1E-9 , __magic_name__ :Optional[int]="mean" , __magic_name__ :List[str]="relative_shift" , __magic_name__ :int=True , __magic_name__ :str=True , __magic_name__ :List[str]=True , **__magic_name__ :Optional[int] , ):
'''simple docstring'''
a = vocab_size
a = block_sizes
a = [1] * len(__magic_name__ ) if block_repeats is None else block_repeats
assert len(__magic_name__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a = num_decoder_layers
a = d_model
a = n_head
a = d_head
a = d_inner
a = hidden_act
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = initializer_range
a = initializer_std
a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
a = attention_type
a = separate_cls
a = truncate_seq
a = pool_q_only
super().__init__(**__magic_name__ )
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[Any] ):
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase__ ( self :str , __magic_name__ :List[str] ):
'''simple docstring'''
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 468 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =0
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowercase =AutoImageProcessor.from_pretrained(snake_case).to_dict()
config_dict.pop('image_processor_type')
_lowercase =CLIPImageProcessor(**snake_case)
# save in new folder
model_config.save_pretrained(snake_case)
config.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
# make sure private variable is not incorrectly saved
_lowercase =json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'clip-base is not a local folder and is not a valid model identifier'):
_lowercase =AutoImageProcessor.from_pretrained('clip-base')
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_lowercase =AutoImageProcessor.from_pretrained(snake_case, revision='aaaaaa')
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case, trust_remote_code=snake_case)
self.assertEqual(reloaded_image_processor.__class__.__name__, 'NewImageProcessor')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case):
AutoImageProcessor.register(snake_case, snake_case)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =CustomImageProcessor.from_pretrained(snake_case)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self :str):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =True
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# If remote code is not set, the default is to use local
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(not hasattr(snake_case, 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 181 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : str = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 139 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowercase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt"}
SCREAMING_SNAKE_CASE__ : int = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"openbmb/cpm-ant-10b": 1024,
}
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = collections.OrderedDict()
with open(lowercase__ , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE__ : Dict = reader.readlines()
for index, token in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = token.rstrip('\n' )
SCREAMING_SNAKE_CASE__ : Dict = index
return vocab
class snake_case ( UpperCamelCase_ ):
def __init__( self : List[Any] , a_ : List[str] , a_ : Union[str, Any]="<unk>" , a_ : int=200 )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = vocab
SCREAMING_SNAKE_CASE__ : Tuple = unk_token
SCREAMING_SNAKE_CASE__ : int = max_input_chars_per_word
def __lowercase( self : Union[str, Any] , a_ : Optional[int] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = list(a_ )
if len(a_ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : List[str] = []
while start < len(a_ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ )
SCREAMING_SNAKE_CASE__ : str = None
while start < end:
SCREAMING_SNAKE_CASE__ : Any = ''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a_ )
SCREAMING_SNAKE_CASE__ : Dict = end
return sub_tokens
class snake_case ( UpperCamelCase_ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
lowercase_ = False
def __init__( self : Any , a_ : Union[str, Any] , a_ : Dict="<d>" , a_ : List[Any]="</d>" , a_ : Tuple="<s>" , a_ : Tuple="</s>" , a_ : Optional[Any]="<pad>" , a_ : Optional[Any]="<unk>" , a_ : Union[str, Any]="</n>" , a_ : int="</_>" , a_ : Any="left" , **a_ : Optional[int] , )-> str:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=a_ , eod_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , unk_token=a_ , line_token=a_ , space_token=a_ , padding_side=a_ , **a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = bod_token
SCREAMING_SNAKE_CASE__ : List[str] = eod_token
SCREAMING_SNAKE_CASE__ : List[str] = load_vocab(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __lowercase( self : str )-> Any:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
return len(self.encoder )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase( self : Any , a_ : Any )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = []
for x in jieba.cut(a_ , cut_all=a_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a_ ) )
return output_tokens
def __lowercase( self : str , a_ : int , **a_ : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a_ , **a_ )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __lowercase( self : str , a_ : List[str] )-> str:
"""simple docstring"""
return "".join(a_ )
def __lowercase( self : int , a_ : Tuple )-> int:
"""simple docstring"""
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def __lowercase( self : str , a_ : List[Any] )-> Tuple:
"""simple docstring"""
return self.decoder.get(a_ , self.unk_token )
def __lowercase( self : Any , a_ : str , a_ : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if os.path.isdir(a_ ):
SCREAMING_SNAKE_CASE__ : str = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE__ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory
SCREAMING_SNAKE_CASE__ : str = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder['\n']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a_ : x[1] ) )
with open(a_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __lowercase( self : Tuple , a_ : List[int] , a_ : List[int] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowercase( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ ))
return [1] + ([0] * len(a_ ))
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "mobilenet_v1"
def __init__( self : Optional[int] , __a : List[str]=3 , __a : Union[str, Any]=224 , __a : Tuple=1.0 , __a : List[Any]=8 , __a : Union[str, Any]="relu6" , __a : Dict=True , __a : Tuple=0.9_99 , __a : Dict=0.02 , __a : Any=0.0_01 , **__a : Any , ) -> int:
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = depth_multiplier
_UpperCamelCase : int = min_depth
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = tf_padding
_UpperCamelCase : int = classifier_dropout_prob
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Union[str, Any] = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1e-4
| 624 | 0 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
_UpperCAmelCase = """mock-s3-bucket"""
_UpperCAmelCase = F"""s3://{mock_bucket}"""
_UpperCAmelCase = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path.startswith("""s3://""" ) is False
_UpperCAmelCase = """./local/path"""
_UpperCAmelCase = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path == new_dataset_path
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is True
_UpperCAmelCase = fsspec.filesystem("""file""" )
_UpperCAmelCase = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _UpperCAmelCase )
def _UpperCamelCase ( _A , _A , _A , _A , _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
_UpperCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
_UpperCAmelCase = F"""for \'{compression_fs_class.protocol}\' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
_UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f, open(_UpperCAmelCase , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
_UpperCAmelCase = compressed_file_paths[protocol]
_UpperCAmelCase = """dataset.jsonl"""
_UpperCAmelCase = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
_UpperCAmelCase ,*_UpperCAmelCase = fsspec.get_fs_token_paths(_UpperCAmelCase )
assert fs.isfile(_UpperCAmelCase )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def _UpperCamelCase ( _A , _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase )
_UpperCAmelCase = HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_UpperCAmelCase ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase )
with pytest.warns(_UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 19 | 0 |
"""simple docstring"""
UpperCAmelCase__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 224 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : str = "" , _lowerCamelCase : bool = False ):
# Mapping from the first character of the prefix of the node
_snake_case = {}
# A node will be a leaf if the tree contains its word
_snake_case = is_leaf
_snake_case = prefix
def lowercase ( self : List[Any] , _lowerCamelCase : str ):
_snake_case = 0
for q, w in zip(self.prefix , _lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase ( self : Optional[Any] , _lowerCamelCase : list[str] ):
for word in words:
self.insert(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_snake_case = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_snake_case = RadixNode(prefix=_lowerCamelCase , is_leaf=_lowerCamelCase )
else:
_snake_case = self.nodes[word[0]]
_snake_case , _snake_case , _snake_case = incoming_node.match(
_lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_snake_case = remaining_prefix
_snake_case = self.nodes[matching_string[0]]
_snake_case = RadixNode(_lowerCamelCase , _lowerCamelCase )
_snake_case = aux_node
if remaining_word == "":
_snake_case = True
else:
self.nodes[matching_string[0]].insert(_lowerCamelCase )
def lowercase ( self : List[Any] , _lowerCamelCase : str ):
_snake_case = self.nodes.get(word[0] , _lowerCamelCase )
if not incoming_node:
return False
else:
_snake_case , _snake_case , _snake_case = incoming_node.match(
_lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str ):
_snake_case = self.nodes.get(word[0] , _lowerCamelCase )
if not incoming_node:
return False
else:
_snake_case , _snake_case , _snake_case = incoming_node.match(
_lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_snake_case = list(self.nodes.values() )[0]
_snake_case = merging_node.is_leaf
self.prefix += merging_node.prefix
_snake_case = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_snake_case = False
# If there is 1 edge, we merge it with its child
else:
_snake_case = list(incoming_node.nodes.values() )[0]
_snake_case = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_snake_case = merging_node.nodes
return True
def lowercase ( self : List[Any] , _lowerCamelCase : int = 0 ):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _UpperCAmelCase ( ) -> bool:
_snake_case = '''banana bananas bandana band apple all beast'''.split()
_snake_case = RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def _UpperCAmelCase ( ) -> None:
assert test_trie()
def _UpperCAmelCase ( ) -> None:
_snake_case = RadixNode()
_snake_case = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__lowerCamelCase )
print('''Words:''' , __lowerCamelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 224 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowercase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __UpperCamelCase ( a : Any ) ->List[str]:
snake_case = {}
with open(a , '''r''' ) as file:
for line_number, line in enumerate(a ):
snake_case = line.strip()
if line:
snake_case = line.split()
snake_case = line_number
snake_case = words[0]
snake_case = value
return result
def __UpperCamelCase ( a : Tuple , a : Tuple , a : Optional[int] , a : int , a : str ) ->Any:
for attribute in key.split('''.''' ):
snake_case = getattr(a , a )
snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a ):
snake_case = PARAM_MAPPING[full_name.split('''.''' )[-1]]
snake_case = '''param'''
if weight_type is not None and weight_type != "param":
snake_case = getattr(a , a ).shape
elif weight_type is not None and weight_type == "param":
snake_case = hf_pointer
for attribute in hf_param_name.split('''.''' ):
snake_case = getattr(a , a )
snake_case = shape_pointer.shape
# let's reduce dimension
snake_case = value[0]
else:
snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
snake_case = getattr(a , a )
snake_case = value
else:
snake_case = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCamelCase ( a : Optional[int] , a : Optional[Any] , a : Optional[int] , a : Any , a : int ) ->Dict:
snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a ):
snake_case = PARAM_MAPPING[full_name.split('''.''' )[-1]]
snake_case = '''param'''
if weight_type is not None and weight_type != "param":
snake_case = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case = '''.'''.join([key, hf_param_name] )
else:
snake_case = key
snake_case = value if '''lm_head''' in full_key else value[0]
_lowercase = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __UpperCamelCase ( a : List[Any] , a : Optional[int] , a : Optional[Any]=None , a : Any=None ) ->Any:
snake_case = False
for key, mapped_key in MAPPING.items():
snake_case = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(a )[0].split('''.''' )[-2]
snake_case = mapped_key.replace('''*''' , a )
if "weight_g" in name:
snake_case = '''weight_g'''
elif "weight_v" in name:
snake_case = '''weight_v'''
elif "bias" in name:
snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case = '''weight'''
else:
snake_case = None
if hf_dict is not None:
rename_dict(a , a , a , a , a )
else:
set_recursively(a , a , a , a , a )
return is_used
return is_used
def __UpperCamelCase ( a : List[Any] , a : Union[str, Any] , a : str ) ->Any:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case = True
else:
snake_case = load_wavaveca_layer(a , a , a )
if not is_used:
unused_weights.append(a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __UpperCamelCase ( a : Union[str, Any] , a : List[str] , a : Union[str, Any] , a : Dict , a : List[str] ) ->Tuple:
snake_case = full_name.split('''conv_layers.''' )[-1]
snake_case = name.split('''.''' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a )
@torch.no_grad()
def __UpperCamelCase ( a : Tuple , a : Tuple , a : List[str]=None , a : Tuple=None , a : str=True , a : Tuple=False ) ->Dict:
if config_path is not None:
snake_case = WavaVecaConfig.from_pretrained(a )
else:
snake_case = WavaVecaConfig()
if is_seq_class:
snake_case = read_txt_into_dict(a )
snake_case = idalabel
snake_case = WavaVecaForSequenceClassification(a )
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
feature_extractor.save_pretrained(a )
elif is_finetuned:
if dict_path:
snake_case = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(a , '''vocab.json''' )
if not os.path.isdir(a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a ) )
return
os.makedirs(a , exist_ok=a )
snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case = 0
snake_case = 1
with open(a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(a , a )
snake_case = WavaVecaCTCTokenizer(
a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a , )
snake_case = True if config.feat_extract_norm == '''layer''' else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a , return_attention_mask=a , )
snake_case = WavaVecaProcessor(feature_extractor=a , tokenizer=a )
processor.save_pretrained(a )
snake_case = WavaVecaForCTC(a )
else:
snake_case = WavaVecaForPreTraining(a )
if is_finetuned or is_seq_class:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case = argparse.Namespace(task='''audio_pretraining''' )
snake_case = fairseq.tasks.setup_task(a )
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a )
snake_case = model[0].eval()
recursively_load_weights(a , a , not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 44 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
a__ : Dict = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
a__ : Any = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
a__ : Optional[Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
a__ : Optional[int] = tempfile.mkdtemp()
a__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple = os.path.join(self.tmpdirname , a_ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
# load decoder from hub
a__ : Tuple = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCAmelCase ( self : List[Any] , **a_ : Dict ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = self.add_kwargs_tokens_map.copy()
kwargs.update(a_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Dict , **a_ : str ) -> Optional[int]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Any , **a_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a_ )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
a__ : str = self.get_tokenizer()
a__ : str = self.get_feature_extractor()
a__ : Optional[Any] = self.get_decoder()
a__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a_ )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a__ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
a__ : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(a_ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=a_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : int = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_decoder()
a__ : str = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
a__ : Dict = floats_list((3, 10_00) )
a__ : List[Any] = feature_extractor(a_ , return_tensors="np" )
a__ : Optional[Any] = processor(a_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = self.get_feature_extractor()
a__ : Optional[Any] = self.get_tokenizer()
a__ : Any = self.get_decoder()
a__ : str = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
a__ : Optional[int] = "This is a test string"
a__ : Union[str, Any] = processor(text=a_ )
a__ : List[Any] = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Dict , a_ : Tuple=(2, 10, 16) , a_ : Optional[int]=77 ) -> Any:
'''simple docstring'''
np.random.seed(a_ )
return np.random.rand(*a_ )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : int = self.get_feature_extractor()
a__ : Dict = self.get_tokenizer()
a__ : List[str] = self.get_decoder()
a__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
a__ : List[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
a__ : Optional[int] = processor.decode(a_ )
a__ : Optional[int] = decoder.decode_beams(a_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def UpperCAmelCase ( self : Dict , a_ : Dict ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.get_feature_extractor()
a__ : List[str] = self.get_tokenizer()
a__ : Dict = self.get_decoder()
a__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
a__ : List[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a__ : Optional[Any] = processor.batch_decode(a_ )
else:
with get_context(a_ ).Pool() as pool:
a__ : int = processor.batch_decode(a_ , a_ )
a__ : Optional[Any] = list(a_ )
with get_context("fork" ).Pool() as p:
a__ : List[Any] = decoder.decode_beams_batch(a_ , a_ )
a__ , a__ , a__ : str = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a_ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(a_ , decoded_processor.logit_score )
self.assertListEqual(a_ , decoded_processor.lm_score )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_feature_extractor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : int = self.get_decoder()
a__ : List[str] = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
a__ : Optional[int] = self._get_dummy_logits()
a__ : Dict = 15
a__ : Optional[int] = -20.0
a__ : Optional[int] = -4.0
a__ : Union[str, Any] = processor.batch_decode(
a_ , beam_width=a_ , beam_prune_logp=a_ , token_min_logp=a_ , )
a__ : Any = decoded_processor_out.text
a__ : Optional[Any] = list(a_ )
with get_context("fork" ).Pool() as pool:
a__ : Union[str, Any] = decoder.decode_beams_batch(
a_ , a_ , beam_width=a_ , beam_prune_logp=a_ , token_min_logp=a_ , )
a__ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
a__ : Tuple = [d[0][2] for d in decoded_decoder_out]
a__ : Union[str, Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a_ , a_ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , a_ )
self.assertTrue(np.array_equal(a_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , a_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(a_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , a_ , atol=1E-3 ) )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_decoder()
a__ : int = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
a__ : Tuple = self._get_dummy_logits()
a__ : str = 2.0
a__ : Optional[int] = 5.0
a__ : Optional[int] = -20.0
a__ : Any = True
a__ : List[str] = processor.batch_decode(
a_ , alpha=a_ , beta=a_ , unk_score_offset=a_ , lm_score_boundary=a_ , )
a__ : List[Any] = decoded_processor_out.text
a__ : Tuple = list(a_ )
decoder.reset_params(
alpha=a_ , beta=a_ , unk_score_offset=a_ , lm_score_boundary=a_ , )
with get_context("fork" ).Pool() as pool:
a__ : Optional[int] = decoder.decode_beams_batch(
a_ , a_ , )
a__ : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a_ , a_ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , a_ )
a__ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , a_ )
def UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
a__ : Dict = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a__ : List[Any] = os.listdir(a_ )
a__ : Optional[int] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = snapshot_download("hf-internal-testing/processor_with_lm" )
a__ : str = WavaVecaProcessorWithLM.from_pretrained(a_ )
a__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
a__ : List[str] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a__ : List[Any] = os.listdir(a_ )
a__ : Any = os.listdir(a_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : List[str] = floats_list((3, 10_00) )
a__ : Tuple = processor_wavaveca(a_ , return_tensors="np" )
a__ : Any = processor_auto(a_ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
a__ : Optional[int] = self._get_dummy_logits()
a__ : Optional[Any] = processor_wavaveca.batch_decode(a_ )
a__ : int = processor_auto.batch_decode(a_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.get_feature_extractor()
a__ : Optional[Any] = self.get_tokenizer()
a__ : List[Any] = self.get_decoder()
a__ : Any = WavaVecaProcessorWithLM(tokenizer=a_ , feature_extractor=a_ , decoder=a_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def UpperCAmelCase ( a_ : Dict , a_ : Any ) -> Dict:
'''simple docstring'''
a__ : Tuple = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Dict = self._get_dummy_logits()[0]
a__ : Union[str, Any] = processor.decode(a_ , output_word_offsets=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(a_ , a_ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
a__ : Tuple = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Union[str, Any] = self._get_dummy_logits()
a__ : Union[str, Any] = processor.batch_decode(a_ , output_word_offsets=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(a_ , a_ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(a_ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
a__ : List[Any] = load_dataset("common_voice" , "en" , split="train" , streaming=a_ )
a__ : Optional[int] = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00 ) )
a__ : Optional[int] = iter(a_ )
a__ : Optional[int] = next(a_ )
a__ : Any = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
a__ : Optional[Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a__ : int = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
a__ : int = model(a_ ).logits.cpu().numpy()
a__ : str = processor.decode(logits[0] , output_word_offsets=a_ )
a__ : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a__ : Optional[Any] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
a__ : List[str] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(a_ , "word" ) ) , a_ )
self.assertEqual(" ".join(self.get_from_offsets(a_ , "word" ) ) , output.text )
# output times
a__ : Optional[Any] = torch.tensor(self.get_from_offsets(a_ , "start_time" ) )
a__ : str = torch.tensor(self.get_from_offsets(a_ , "end_time" ) )
# fmt: off
a__ : Optional[int] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
a__ : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(a_ , a_ , atol=0.01 ) )
self.assertTrue(torch.allclose(a_ , a_ , atol=0.01 ) )
| 642 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str ) -> list[int]:
'''simple docstring'''
a__ : List[str] = [0 for i in range(len(lowerCAmelCase__ ) )]
# initialize interval's left pointer and right pointer
a__ , a__ : int = 0, 0
for i in range(1 , len(lowerCAmelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
a__ : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
a__ : List[str] = min_edge
while go_next(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a__ , a__ : str = i, i + z_result[i] - 1
return z_result
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(lowerCAmelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
a__ : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a__ : List[str] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCAmelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642 | 1 |
from datetime import datetime as dt
import os
from github import Github
a__ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE : List[Any] = g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE : int = repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 703 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE : int = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE : List[str] = RagConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE : List[Any] = question_encoder_config
SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
a__ , a__ , config=a__ )
rag_model.save_pretrained(a__ )
# Sanity check.
model_class.from_pretrained(a__ )
# Save tokenizers.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(a__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(a__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
a__ : Dict = parser.parse_args()
a__ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 333 | 0 |
"""simple docstring"""
import numpy as np
def A_ ( snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : str ,snake_case_ : Optional[int] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : int = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase : Dict = np.zeros((n + 1,) )
UpperCamelCase : Optional[int] = ya
UpperCamelCase : Optional[Any] = xa
for k in range(snake_case_ ):
UpperCamelCase : Optional[Any] = f(snake_case_ ,y[k] )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCamelCase : Optional[int] = f(x + h ,y[k] + h * ka )
UpperCamelCase : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A : Dict = logging.get_logger(__name__)
__A : List[Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def A_ ( snake_case_ : List[DatasetType] ,snake_case_ : Optional[List[float]] = None ,snake_case_ : Optional[int] = None ,snake_case_ : Optional[DatasetInfo] = None ,snake_case_ : Optional[NamedSplit] = None ,snake_case_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ ,(Dataset, IterableDataset) ):
if isinstance(snake_case_ ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(snake_case_ )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.' )
if i == 0:
UpperCamelCase , UpperCamelCase : Dict = (
(Dataset, IterableDataset) if isinstance(snake_case_ ,snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ ,snake_case_ ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_ ,snake_case_ ,snake_case_ ,info=snake_case_ ,split=snake_case_ ,stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_ ,snake_case_ ,snake_case_ ,info=snake_case_ ,split=snake_case_ ,stopping_strategy=snake_case_ )
def A_ ( snake_case_ : List[DatasetType] ,snake_case_ : Optional[DatasetInfo] = None ,snake_case_ : Optional[NamedSplit] = None ,snake_case_ : int = 0 ,):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ ,(Dataset, IterableDataset) ):
if isinstance(snake_case_ ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(snake_case_ )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.' )
if i == 0:
UpperCamelCase , UpperCamelCase : List[str] = (
(Dataset, IterableDataset) if isinstance(snake_case_ ,snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ ,snake_case_ ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_ ,info=snake_case_ ,split=snake_case_ ,axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_ ,info=snake_case_ ,split=snake_case_ ,axis=snake_case_ )
| 499 | 1 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = random.Random()
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : int=1.0 ,_snake_case : Dict=None ,_snake_case : Any=None ):
'''simple docstring'''
if rng is None:
lowercase__ = global_rng
lowercase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case (unittest.TestCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=400 ,UpperCAmelCase_=2_000 ,UpperCAmelCase_=10 ,UpperCAmelCase_=160 ,UpperCAmelCase_=8 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=4_000 ,UpperCAmelCase_=False ,UpperCAmelCase_=True ,) -> str:
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = min_seq_length
lowercase__ = max_seq_length
lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ = padding_value
lowercase__ = sampling_rate
lowercase__ = return_attention_mask
lowercase__ = do_normalize
lowercase__ = feature_size
lowercase__ = chunk_length
lowercase__ = hop_length
def _a ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self ,UpperCAmelCase_=False ,UpperCAmelCase_=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase_ ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
lowercase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
lowercase__ = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :Tuple = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> int:
lowercase__ = WhisperFeatureExtractionTester(self )
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
lowercase__ = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(UpperCAmelCase_ ,"feat_extract.json" )
feat_extract_first.to_json_file(UpperCAmelCase_ )
lowercase__ = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
lowercase__ = feat_extract_first.to_dict()
lowercase__ = feat_extract_second.to_dict()
lowercase__ = feat_extract_first.mel_filters
lowercase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase__ = feature_extractor(UpperCAmelCase_ ,padding="max_length" ,return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase__ = feature_extractor(speech_inputs[0] ,return_tensors="np" ).input_features
lowercase__ = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ,atol=1E-3 ) )
# Test batched
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="np" ).input_features
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ = np.asarray(UpperCAmelCase_ )
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="np" ).input_features
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ,atol=1E-3 ) )
# Test truncation required
lowercase__ = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
lowercase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated]
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="np" ).input_features
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ,atol=1E-3 ) )
def _a ( self ) -> Union[str, Any]:
import torch
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = np.random.rand(100 ,32 ).astype(np.floataa )
lowercase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
lowercase__ = ds.sort("id" ).select(range(UpperCAmelCase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _a ( self ) -> Union[str, Any]:
# fmt: off
lowercase__ = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
lowercase__ = self._load_datasamples(1 )
lowercase__ = WhisperFeatureExtractor()
lowercase__ = feature_extractor(UpperCAmelCase_ ,return_tensors="pt" ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,UpperCAmelCase_ ,atol=1E-4 ) )
def _a ( self ) -> int:
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = self._load_datasamples(1 )[0]
lowercase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowercase__ = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=UpperCAmelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
| 539 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case (unittest.TestCase , UpperCamelCase ):
def _a ( self ) -> List[str]:
lowercase__ = load_tool("text-classification" )
self.tool.setup()
lowercase__ = load_tool("text-classification" ,remote=UpperCAmelCase_ )
def _a ( self ) -> Any:
lowercase__ = self.tool("That's quite cool" ,["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> Optional[int]:
lowercase__ = self.remote_tool("That's quite cool" ,["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> List[Any]:
lowercase__ = self.tool(text="That's quite cool" ,labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
def _a ( self ) -> List[Any]:
lowercase__ = self.remote_tool(text="That's quite cool" ,labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase_ ,"positive" )
| 539 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase ( UpperCAmelCase__ ):
def __get__( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
SCREAMING_SNAKE_CASE = """__cached_""" + self.fget.__name__
SCREAMING_SNAKE_CASE = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if cached is None:
SCREAMING_SNAKE_CASE = self.fget(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cached
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
if is_torch_fx_proxy(_UpperCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(_UpperCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_UpperCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_UpperCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_UpperCamelCase , np.ndarray )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
return isinstance(_UpperCamelCase , np.ndarray )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
return _is_numpy(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
import torch
return isinstance(_UpperCamelCase , torch.Tensor )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
import torch
return isinstance(_UpperCamelCase , torch.device )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
import torch
if isinstance(_UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase )
else:
return False
return isinstance(_UpperCamelCase , torch.dtype )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import tensorflow as tf
return isinstance(_UpperCamelCase , tf.Tensor )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_UpperCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(_UpperCamelCase )
return type(_UpperCamelCase ) == tf.Tensor
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_UpperCamelCase , jnp.ndarray )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Any:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
if isinstance(_UpperCamelCase , (dict, UserDict) ):
return {k: to_py_obj(_UpperCamelCase ) for k, v in obj.items()}
elif isinstance(_UpperCamelCase , (list, tuple) ):
return [to_py_obj(_UpperCamelCase ) for o in obj]
elif is_tf_tensor(_UpperCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_UpperCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_UpperCamelCase ):
return np.asarray(_UpperCamelCase ).tolist()
elif isinstance(_UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(_UpperCamelCase , (dict, UserDict) ):
return {k: to_numpy(_UpperCamelCase ) for k, v in obj.items()}
elif isinstance(_UpperCamelCase , (list, tuple) ):
return np.array(_UpperCamelCase )
elif is_tf_tensor(_UpperCamelCase ):
return obj.numpy()
elif is_torch_tensor(_UpperCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_UpperCamelCase ):
return np.asarray(_UpperCamelCase )
else:
return obj
class UpperCamelCase ( UpperCAmelCase__ ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = fields(self )
# Safety and consistency checks
if not len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
SCREAMING_SNAKE_CASE = getattr(self , class_fields[0].name )
SCREAMING_SNAKE_CASE = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = first_field.items()
SCREAMING_SNAKE_CASE = True
else:
try:
SCREAMING_SNAKE_CASE = iter(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = True
except TypeError:
SCREAMING_SNAKE_CASE = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_SCREAMING_SNAKE_CASE ):
if (
not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )
or not len(_SCREAMING_SNAKE_CASE ) == 2
or not isinstance(element[0] , _SCREAMING_SNAKE_CASE )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
SCREAMING_SNAKE_CASE = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
SCREAMING_SNAKE_CASE = element[1]
elif first_field is not None:
SCREAMING_SNAKE_CASE = first_field
else:
for field in class_fields:
SCREAMING_SNAKE_CASE = getattr(self , field.name )
if v is not None:
SCREAMING_SNAKE_CASE = v
def __delitem__( self : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCamelCase ( self : int , *snake_case__ : List[Any] , **snake_case__ : Any ):
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCamelCase ( self : Optional[Any] , *snake_case__ : int , **snake_case__ : int ):
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCamelCase ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ):
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().__setattr__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __setitem__( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple ):
"""simple docstring"""
super().__setitem__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
@classmethod
def UpperCamelCase ( cls : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCamelCase ( UpperCAmelCase__ ):
__UpperCamelCase ='longest'
__UpperCamelCase ='max_length'
__UpperCamelCase ='do_not_pad'
class UpperCamelCase ( UpperCAmelCase__ ):
__UpperCamelCase ='pt'
__UpperCamelCase ='tf'
__UpperCamelCase ='np'
__UpperCamelCase ='jax'
class UpperCamelCase :
def __init__( self : int , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = context_managers
SCREAMING_SNAKE_CASE = ExitStack()
def __enter__( self : Any ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(_SCREAMING_SNAKE_CASE )
def __exit__( self : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Dict ):
"""simple docstring"""
self.stack.__exit__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = infer_framework(_UpperCamelCase )
if framework == "tf":
SCREAMING_SNAKE_CASE = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_class.__name__
SCREAMING_SNAKE_CASE = infer_framework(_UpperCamelCase )
if framework == "tf":
SCREAMING_SNAKE_CASE = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __lowerCAmelCase ( _UpperCamelCase : MutableMapping , _UpperCamelCase : str = "" , _UpperCamelCase : str = "." ) -> Dict:
'''simple docstring'''
def _flatten_dict(_UpperCamelCase : int , _UpperCamelCase : List[str]="" , _UpperCamelCase : Union[str, Any]="." ):
for k, v in d.items():
SCREAMING_SNAKE_CASE = str(_UpperCamelCase ) + delimiter + str(_UpperCamelCase ) if parent_key else k
if v and isinstance(_UpperCamelCase , _UpperCamelCase ):
yield from flatten_dict(_UpperCamelCase , _UpperCamelCase , delimiter=_UpperCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
@contextmanager
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : bool = False ) -> str:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=None ) -> Any:
'''simple docstring'''
if is_numpy_array(_UpperCamelCase ):
return np.transpose(_UpperCamelCase , axes=_UpperCamelCase )
elif is_torch_tensor(_UpperCamelCase ):
return array.T if axes is None else array.permute(*_UpperCamelCase )
elif is_tf_tensor(_UpperCamelCase ):
import tensorflow as tf
return tf.transpose(_UpperCamelCase , perm=_UpperCamelCase )
elif is_jax_tensor(_UpperCamelCase ):
return jnp.transpose(_UpperCamelCase , axes=_UpperCamelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(_UpperCamelCase )}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if is_numpy_array(_UpperCamelCase ):
return np.reshape(_UpperCamelCase , _UpperCamelCase )
elif is_torch_tensor(_UpperCamelCase ):
return array.reshape(*_UpperCamelCase )
elif is_tf_tensor(_UpperCamelCase ):
import tensorflow as tf
return tf.reshape(_UpperCamelCase , _UpperCamelCase )
elif is_jax_tensor(_UpperCamelCase ):
return jnp.reshape(_UpperCamelCase , _UpperCamelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(_UpperCamelCase )}.""" )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=None ) -> Any:
'''simple docstring'''
if is_numpy_array(_UpperCamelCase ):
return np.squeeze(_UpperCamelCase , axis=_UpperCamelCase )
elif is_torch_tensor(_UpperCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_UpperCamelCase )
elif is_tf_tensor(_UpperCamelCase ):
import tensorflow as tf
return tf.squeeze(_UpperCamelCase , axis=_UpperCamelCase )
elif is_jax_tensor(_UpperCamelCase ):
return jnp.squeeze(_UpperCamelCase , axis=_UpperCamelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(_UpperCamelCase )}.""" )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
if is_numpy_array(_UpperCamelCase ):
return np.expand_dims(_UpperCamelCase , _UpperCamelCase )
elif is_torch_tensor(_UpperCamelCase ):
return array.unsqueeze(dim=_UpperCamelCase )
elif is_tf_tensor(_UpperCamelCase ):
import tensorflow as tf
return tf.expand_dims(_UpperCamelCase , axis=_UpperCamelCase )
elif is_jax_tensor(_UpperCamelCase ):
return jnp.expand_dims(_UpperCamelCase , axis=_UpperCamelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_UpperCamelCase )}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
if is_numpy_array(_UpperCamelCase ):
return np.size(_UpperCamelCase )
elif is_torch_tensor(_UpperCamelCase ):
return array.numel()
elif is_tf_tensor(_UpperCamelCase ):
import tensorflow as tf
return tf.size(_UpperCamelCase )
elif is_jax_tensor(_UpperCamelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_UpperCamelCase )}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_UpperCamelCase , (tuple, list) ):
SCREAMING_SNAKE_CASE = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
SCREAMING_SNAKE_CASE = f"""{repo_id}--{value}"""
return auto_map
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
for base_class in inspect.getmro(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = base_class.__module__
SCREAMING_SNAKE_CASE = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 439 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
A: int = logging.get_logger(__name__)
A: List[str] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _snake_case ( UpperCamelCase : Optional[int] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase : Tuple = k.replace(UpperCamelCase , UpperCamelCase )
if k.startswith("""encoder""" ):
UpperCAmelCase : str = k.replace(""".attn""" , """.self_attn""" )
UpperCAmelCase : str = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCAmelCase : Optional[int] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCAmelCase : Dict = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
UpperCAmelCase : Tuple = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Any = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
UpperCAmelCase : List[Any] = sd.pop(UpperCamelCase )
UpperCAmelCase : Dict = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
UpperCAmelCase : List[str] = v
A: Optional[Any] = ["START"]
@torch.no_grad()
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Tuple ):
UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : str = model["""model"""]
UpperCAmelCase : str = BlenderbotConfig.from_json_file(UpperCamelCase )
UpperCAmelCase : str = BlenderbotForConditionalGeneration(UpperCamelCase )
UpperCAmelCase : List[str] = m.model.state_dict().keys()
UpperCAmelCase : Dict = []
UpperCAmelCase : str = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase : Union[str, Any] = rename_state_dict_key(UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase )
m.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
m.half()
m.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
A: Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 160 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(snake_case__ , "depth_multiplier" ) )
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=3 , snake_case__=32 , snake_case__=0.25 , snake_case__=8 , snake_case__=True , snake_case__=1024 , snake_case__=32 , snake_case__="relu6" , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : str = batch_size
_SCREAMING_SNAKE_CASE : Dict = num_channels
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : Optional[Any] = depth_multiplier
_SCREAMING_SNAKE_CASE : Optional[Any] = min_depth
_SCREAMING_SNAKE_CASE : Optional[Any] = tf_padding
_SCREAMING_SNAKE_CASE : Optional[int] = int(last_hidden_size * depth_multiplier )
_SCREAMING_SNAKE_CASE : Union[str, Any] = output_stride
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : str = classifier_dropout_prob
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : Tuple = is_training
_SCREAMING_SNAKE_CASE : List[Any] = num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = scope
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = MobileNetVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : Dict = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.num_labels
_SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_SCREAMING_SNAKE_CASE : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
A__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = model_class(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_SCREAMING_SNAKE_CASE : Tuple = outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[int] = 26
self.assertEqual(len(snake_case__ ) , snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Any = MobileNetVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.default_image_processor
_SCREAMING_SNAKE_CASE : Dict = prepare_img()
_SCREAMING_SNAKE_CASE : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(**snake_case__ )
# verify the logits
_SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
| 295 |
"""simple docstring"""
import operator as op
def _lowerCAmelCase ( lowerCamelCase__ : Tuple ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : str = lambda lowerCamelCase__, lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE : Any = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ), "Action".center(1_2 ), "Stack", sep=" | " )
print("-" * (3_0 + len(lowerCamelCase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowerCamelCase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ("push(" + x + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
else:
_SCREAMING_SNAKE_CASE : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
_SCREAMING_SNAKE_CASE : Any = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ), ("pop(" + a + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | " )
stack.append(
str(opr[x](int(lowerCamelCase__ ), int(lowerCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ("push(" + a + x + b + ")").ljust(1_2 ), ",".join(lowerCamelCase__ ), sep=" | ", )
return int(stack[0] )
if __name__ == "__main__":
lowercase_ : int = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 295 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Any = np.shape(__snake_case )
if rows != columns:
lowerCAmelCase : Optional[Any] = (
"\'table\' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(__snake_case )
lowerCAmelCase : Optional[Any] = np.zeros((rows, columns) )
lowerCAmelCase : Any = np.zeros((rows, columns) )
for i in range(__snake_case ):
for j in range(__snake_case ):
lowerCAmelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(__snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCAmelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCAmelCase : Union[str, Any] = 1
for j in range(__snake_case , __snake_case ):
lowerCAmelCase : str = sum(lower[i][k] * upper[k][j] for k in range(__snake_case ) )
lowerCAmelCase : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a , a ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 5_0 , a = "pil" , a = True , **a , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , )
_A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_A = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_A = self.scheduler.step(a , a , a ).prev_sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(a )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=a ), "This is a local test"
| 317 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = list(s_dict.keys() )
for key in keys:
_UpperCamelCase : Optional[int] = R'.*/layers_(\d+)'
_UpperCamelCase : int = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
_UpperCamelCase : Tuple = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
_UpperCamelCase : Tuple = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
_UpperCamelCase : int = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCamelCase : Optional[int] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''{key} -> {new_key}''' )
_UpperCamelCase : Union[str, Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase : Optional[Any] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCamelCase : Any = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCamelCase : Any = s_dict[key].shape[0]
_UpperCamelCase : Union[str, Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
_UpperCamelCase : Any = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowerCAmelCase__ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
_UpperCamelCase : str = f.read()
_UpperCamelCase : Optional[int] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
_UpperCamelCase : int = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCamelCase : List[Any] = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
_UpperCamelCase : Union[str, Any] = str(activation[1] )
_UpperCamelCase : Any = num_experts
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Dict="./" , UpperCAmelCase_ : Optional[int]=8 ) -> Optional[int]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
_UpperCamelCase : str = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[Any] = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
_UpperCamelCase : Dict = flax_params['target']
_UpperCamelCase : str = flatten_dict(UpperCAmelCase_ , sep='/' )
_UpperCamelCase : Dict = rename_keys(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCAmelCase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 701 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "masked_bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = layer_norm_eps
_UpperCamelCase : Tuple = pruning_method
_UpperCamelCase : Tuple = mask_init
_UpperCamelCase : Dict = mask_scale
| 648 | 0 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> set[str]:
UpperCamelCase_ ,UpperCamelCase_: Dict = set(UpperCAmelCase__ ), [start]
while stack:
UpperCamelCase_: Any = stack.pop()
explored.add(UpperCAmelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase__ )
return explored
A_ : Dict = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 57 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int =logging.getLogger(__name__)
@dataclass(frozen=__a )
class A_ :
_A :str
_A :str
_A :Optional[str] = None
_A :Optional[str] = None
_A :Optional[str] = None
@dataclass(frozen=__a )
class A_ :
_A :List[int]
_A :Optional[List[int]] = None
_A :Optional[List[int]] = None
_A :Optional[Union[int, float]] = None
_A :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( __a ):
_A :List[InputFeatures]
def __init__( self : List[Any] , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : str , snake_case__ : Optional[int] = None , snake_case__ : Optional[int]=False , snake_case__ : bool = False , ):
lowercase = hans_processors[task]()
lowercase = os.path.join(
snake_case__ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(snake_case__ ) , snake_case__ , ) , )
lowercase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + """.lock"""
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowercase = torch.load(snake_case__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowercase = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info("""Training examples: %s""" , len(snake_case__ ) )
lowercase = hans_convert_examples_to_features(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
logger.info("""Saving features into cached file %s""" , snake_case__ )
torch.save(self.features , snake_case__ )
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : int , snake_case__ : int ):
return self.features[i]
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
_A :List[InputFeatures]
def __init__( self : Union[str, Any] , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : str , snake_case__ : Optional[int] = 1_28 , snake_case__ : Union[str, Any]=False , snake_case__ : bool = False , ):
lowercase = hans_processors[task]()
lowercase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
lowercase = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
lowercase = hans_convert_examples_to_features(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase = tf.data.Dataset.from_generator(
snake_case__ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
return self.dataset
def __len__( self : int ):
return len(self.features )
def __getitem__( self : Optional[Any] , snake_case__ : Union[str, Any] ):
return self.features[i]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return self.label_list
class A_ ( __a ):
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[Any] ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ , """heuristics_train_set.txt""" ) ) , """train""" )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : List[str] , snake_case__ : str ):
lowercase = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
lowercase = """%s-%s""" % (set_type, line[0])
lowercase = line[5]
lowercase = line[6]
lowercase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowercase = line[0]
examples.append(InputExample(guid=snake_case__ , text_a=snake_case__ , text_b=snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
return examples
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowercase = {label: i for i, label in enumerate(lowerCAmelCase__ )}
lowercase = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) ,desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowercase = tokenizer(
example.text_a ,example.text_b ,add_special_tokens=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="""max_length""" ,truncation=lowerCAmelCase__ ,return_overflowing_tokens=lowerCAmelCase__ ,)
lowercase = label_map[example.label] if example.label in label_map else 0
lowercase = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ ,label=lowerCAmelCase__ ,pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''hans''': 3,
}
__SCREAMING_SNAKE_CASE : List[str] ={
'''hans''': HansProcessor,
}
| 428 | 0 |
'''simple docstring'''
def _a ( __lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
snake_case__ : Optional[Any] = -1
snake_case__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case__ : int = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case__ : str = n - a - b
if c * c == (a * a + b * b):
snake_case__ : List[str] = a * b * c
if candidate >= product:
snake_case__ : Optional[int] = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 502 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowerCAmelCase__ : Any = True
except (ImportError, AttributeError):
lowerCAmelCase__ : Dict = object
def _a ( *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
pass
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[str] = logging.get_logger("""transformers-cli/serving""")
def _a ( __lowerCAmelCase : Namespace ):
"""simple docstring"""
snake_case__ : Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__lowerCAmelCase , args.host , args.port , args.workers )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
def __magic_name__ ( snake_case_ : ArgumentParser ):
'''simple docstring'''
snake_case__ : Optional[Any] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=snake_case_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=snake_case_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=snake_case_ , default=8_8_8_8 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=snake_case_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=snake_case_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=snake_case_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=snake_case_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=snake_case_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=snake_case_ )
def __init__( self : Union[str, Any] , snake_case_ : Pipeline , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Any = pipeline
snake_case__ : Tuple = host
snake_case__ : Optional[Any] = port
snake_case__ : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
snake_case__ : str = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=snake_case_ , response_class=snake_case_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=snake_case_ , response_class=snake_case_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=snake_case_ , response_class=snake_case_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=snake_case_ , response_class=snake_case_ , methods=['''POST'''] , ),
] , timeout=6_0_0 , )
def __magic_name__ ( self : str ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __magic_name__ ( self : List[str] , snake_case_ : str = Body(snake_case_ , embed=snake_case_ ) , snake_case_ : bool = Body(snake_case_ , embed=snake_case_ ) ):
'''simple docstring'''
try:
snake_case__ : Optional[Any] = self._pipeline.tokenizer.tokenize(snake_case_ )
if return_ids:
snake_case__ : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(snake_case_ )
return ServeTokenizeResult(tokens=snake_case_ , tokens_ids=snake_case_ )
else:
return ServeTokenizeResult(tokens=snake_case_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(snake_case_ )} )
def __magic_name__ ( self : List[Any] , snake_case_ : List[int] = Body(snake_case_ , embed=snake_case_ ) , snake_case_ : bool = Body(snake_case_ , embed=snake_case_ ) , snake_case_ : bool = Body(snake_case_ , embed=snake_case_ ) , ):
'''simple docstring'''
try:
snake_case__ : Optional[int] = self._pipeline.tokenizer.decode(snake_case_ , snake_case_ , snake_case_ )
return ServeDeTokenizeResult(model='''''' , text=snake_case_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'''model''': '''''', '''error''': str(snake_case_ )} )
async def __magic_name__ ( self : Tuple , snake_case_ : List[str]=Body(snake_case_ , embed=snake_case_ ) ):
'''simple docstring'''
if len(snake_case_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case__ : Tuple = self._pipeline(snake_case_ )
return ServeForwardResult(output=snake_case_ )
except Exception as e:
raise HTTPException(5_0_0 , {'''error''': str(snake_case_ )} )
| 502 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''data2vec-text'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 5 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a__ ( _a , unittest.TestCase ):
snake_case_ = XLMProphetNetTokenizer
snake_case_ = False
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMProphetNetTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "[PAD]"
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "[PAD]" )
self.assertEqual(vocab_keys[1], "[CLS]" )
self.assertEqual(vocab_keys[-1], "j" )
self.assertEqual(len(_UpperCAmelCase ), 1012 )
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1012 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = XLMProphetNetTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
lowercase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowercase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
], )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
], )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "Hello World!"
lowercase__ = [3_5389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="microsoft/xprophetnet-large-wiki100-cased", revision="1acad1643ddd54a44df6a1b797ada8373685d90e", )
| 668 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
_lowerCamelCase = data
_lowerCamelCase = previous
_lowerCamelCase = next_node
def __str__( self ):
return F"""{self.data}"""
def snake_case__ ( self ):
return self.data
def snake_case__ ( self ):
return self.next
def snake_case__ ( self ):
return self.previous
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = head
def __iter__( self ):
return self
def snake_case__ ( self ):
if not self.current:
raise StopIteration
else:
_lowerCamelCase = self.current.get_data()
_lowerCamelCase = self.current.get_next()
return value
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = None # First node in list
_lowerCamelCase = None # Last node in list
def __str__( self ):
_lowerCamelCase = self.head
_lowerCamelCase = []
while current is not None:
nodes.append(current.get_data() )
_lowerCamelCase = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , lowerCamelCase__ ):
_lowerCamelCase = self.head
while current:
if current.get_data() == value:
return True
_lowerCamelCase = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def snake_case__ ( self ):
if self.head:
return self.head.get_data()
return None
def snake_case__ ( self ):
if self.tail:
return self.tail.get_data()
return None
def snake_case__ ( self , lowerCamelCase__ ):
if self.head is None:
_lowerCamelCase = node
_lowerCamelCase = node
else:
self.insert_before_node(self.head , _a )
def snake_case__ ( self , lowerCamelCase__ ):
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = node
_lowerCamelCase = node.previous
if node.get_previous() is None:
_lowerCamelCase = node_to_insert
else:
_lowerCamelCase = node_to_insert
_lowerCamelCase = node_to_insert
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = node
_lowerCamelCase = node.next
if node.get_next() is None:
_lowerCamelCase = node_to_insert
else:
_lowerCamelCase = node_to_insert
_lowerCamelCase = node_to_insert
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = 1
_lowerCamelCase = Node(_a )
_lowerCamelCase = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
_lowerCamelCase = node.next
self.insert_after_node(self.tail , _a )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.head
while node:
if node.get_data() == item:
return node
_lowerCamelCase = node.get_next()
raise Exception('''Node not found''' )
def snake_case__ ( self , lowerCamelCase__ ):
if (node := self.get_node(_a )) is not None:
if node == self.head:
_lowerCamelCase = self.head.get_next()
if node == self.tail:
_lowerCamelCase = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def snake_case__ ( lowerCamelCase__ ):
if node.get_next():
_lowerCamelCase = node.previous
if node.get_previous():
_lowerCamelCase = node.next
_lowerCamelCase = None
_lowerCamelCase = None
def snake_case__ ( self ):
return self.head is None
def lowerCAmelCase_( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE : int | float , SCREAMING_SNAKE_CASE : int | float , SCREAMING_SNAKE_CASE : int = 100 , ) -> float:
__lowercase = x_start
__lowercase = fnc(SCREAMING_SNAKE_CASE )
__lowercase = 0.0
for _ in range(SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
__lowercase = (x_end - x_start) / steps + xa
__lowercase = fnc(SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__lowercase = xa
__lowercase = fxa
return length
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
SCREAMING_SNAKE_CASE__ = 10
while i <= 10_0000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 688 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case_ = True
except ImportError:
snake_case_ = False
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class SCREAMING_SNAKE_CASE__ (lowerCAmelCase_ ):
@staticmethod
def snake_case_ ( a):
lowercase__ : List[Any] = parser.add_parser('add-new-model')
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.')
add_new_model_parser.add_argument('--testing_file' , type=snake_case_ , help='Configuration file on which to run.')
add_new_model_parser.add_argument(
'--path' , type=snake_case_ , help='Path to cookiecutter. Should only be used for testing purposes.')
add_new_model_parser.set_defaults(func=snake_case_)
def __init__( self , a , a , a=None , *a):
lowercase__ : Optional[int] = testing
lowercase__ : Union[str, Any] = testing_file
lowercase__ : Dict = path
def snake_case_ ( self):
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.')
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase__ : List[str] = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(snake_case_) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.')
lowercase__ : Any = (
Path(snake_case_).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
lowercase__ : str = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case_))
else:
with open(self._testing_file , 'r') as configuration_file:
lowercase__ : Dict = json.load(snake_case_)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=snake_case_ , extra_context=snake_case_ , )
lowercase__ : Any = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r') as configuration_file:
lowercase__ : Tuple = json.load(snake_case_)
lowercase__ : str = configuration['lowercase_modelname']
lowercase__ : Union[str, Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""")
lowercase__ : Optional[int] = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowercase__ : Dict = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowercase__ : Any = 'Flax' in generate_tensorflow_pytorch_and_flax
lowercase__ : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(snake_case_ , exist_ok=snake_case_)
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=snake_case_)
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , 'w'):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(a):
with open(snake_case_ , 'r') as f:
lowercase__ : Union[str, Any] = f.readlines()
with open(snake_case_ , 'w') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case_)
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""")
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""")
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""")
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""")
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""")
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""")
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(a , a , a):
# Create temp file
lowercase__ , lowercase__ : Dict = mkstemp()
lowercase__ : List[Any] = False
with fdopen(snake_case_ , 'w') as new_file:
with open(snake_case_) as old_file:
for line in old_file:
new_file.write(snake_case_)
if line_to_copy_below in line:
lowercase__ : int = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case_)
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""")
# Copy the file permissions from the old file to the new file
copymode(snake_case_ , snake_case_)
# Remove original file
remove(snake_case_)
# Move new file
move(snake_case_ , snake_case_)
def skip_units(a):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(a):
with open(snake_case_) as datafile:
lowercase__ : int = []
lowercase__ : str = False
lowercase__ : List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase__ : Any = line.split('\"')[1]
lowercase__ : Tuple = skip_units(snake_case_)
elif "# Below: " in line and "##" not in line:
lowercase__ : str = line.split('\"')[1]
lowercase__ : Any = skip_units(snake_case_)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case_ , snake_case_ , snake_case_)
lowercase__ : Optional[Any] = []
elif "# Replace with" in line and "##" not in line:
lowercase__ : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(snake_case_)
remove(snake_case_)
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""")
os.rmdir(snake_case_)
| 164 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__SCREAMING_SNAKE_CASE :Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE :Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__SCREAMING_SNAKE_CASE :Dict = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__SCREAMING_SNAKE_CASE :Any = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__SCREAMING_SNAKE_CASE :Dict = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__SCREAMING_SNAKE_CASE :Any = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __lowercase )
return [m.group(0 ) for m in matches]
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(__lowercase )
_UpperCAmelCase = collections.defaultdict(__lowercase )
_UpperCAmelCase = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__lowercase ):
_UpperCAmelCase = None
if _re_tf_models.match(__lowercase ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = "".join(camel_case_split(__lowercase )[:-1] )
_UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_UpperCAmelCase = list(__lowercase )
all_models.sort()
_UpperCAmelCase = {"model_type": all_models}
_UpperCAmelCase = [pt_models[t] for t in all_models]
_UpperCAmelCase = [tf_models[t] for t in all_models]
_UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_UpperCAmelCase = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_UpperCAmelCase = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_UpperCAmelCase = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_UpperCAmelCase = "AutoTokenizer"
_UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_UpperCAmelCase = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
_UpperCAmelCase = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(__lowercase , __lowercase , __lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(__lowercase , __lowercase ):
continue
# First extract all model_names
_UpperCAmelCase = []
for name in getattr(__lowercase , __lowercase ).values():
if isinstance(__lowercase , __lowercase ):
model_names.append(__lowercase )
else:
model_names.extend(list(__lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = get_frameworks_table()
_UpperCAmelCase = Dataset.from_pandas(__lowercase )
_UpperCAmelCase = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__lowercase )
_UpperCAmelCase = Dataset.from_json(__lowercase )
_UpperCAmelCase = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(__lowercase ) )
}
_UpperCAmelCase = update_pipeline_and_auto_class_table(__lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_UpperCAmelCase = sorted(table.keys() )
_UpperCAmelCase = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
_UpperCAmelCase = Dataset.from_pandas(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__lowercase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(__lowercase , "pipeline_tags.json" ) )
if commit_sha is not None:
_UpperCAmelCase = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
_UpperCAmelCase = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=__lowercase , repo_type="dataset" , token=__lowercase , commit_message=__lowercase , )
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
_UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
_UpperCAmelCase = pipeline_tasks[key]["pt"]
if isinstance(__lowercase , (list, tuple) ):
_UpperCAmelCase = model[0]
_UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = ", ".join(__lowercase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
__SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 236 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 700 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """ClapFeatureExtractor"""
lowerCAmelCase_ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if audios is not None:
lowerCamelCase__ = self.feature_extractor(
__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and audios is not None:
lowerCamelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29 | 0 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Optional[Any] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = 42
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , __magic_name__ : PriorTransformer , __magic_name__ : CLIPVisionModel , __magic_name__ : CLIPImageProcessor , __magic_name__ : HeunDiscreteScheduler , __magic_name__ : ShapERenderer , ) -> List[str]:
super().__init__()
self.register_modules(
prior=__magic_name__ , image_encoder=__magic_name__ , image_processor=__magic_name__ , scheduler=__magic_name__ , renderer=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Any ) -> Optional[Any]:
if latents is None:
lowerCamelCase_ : Tuple = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCamelCase_ : Optional[Any] = latents.to(__magic_name__ )
lowerCamelCase_ : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : int=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ : Tuple = torch.device(F"cuda:{gpu_id}" )
lowerCamelCase_ : List[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__magic_name__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , ) -> Union[str, Any]:
if isinstance(__magic_name__ , __magic_name__ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Tuple = torch.cat(__magic_name__ , axis=0 ) if image[0].ndim == 4 else torch.stack(__magic_name__ , axis=0 )
if not isinstance(__magic_name__ , torch.Tensor ):
lowerCamelCase_ : Tuple = self.image_processor(__magic_name__ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ : int = image.to(dtype=self.image_encoder.dtype , device=__magic_name__ )
lowerCamelCase_ : str = self.image_encoder(__magic_name__ )["last_hidden_state"]
lowerCamelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ : int = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ : Union[str, Any] = torch.zeros_like(__magic_name__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__( self : Any , __magic_name__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , __magic_name__ : int = 1 , __magic_name__ : int = 25 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : float = 4.0 , __magic_name__ : int = 64 , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , ) -> Tuple:
if isinstance(__magic_name__ , PIL.Image.Image ):
lowerCamelCase_ : Any = 1
elif isinstance(__magic_name__ , torch.Tensor ):
lowerCamelCase_ : Dict = image.shape[0]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ : Optional[int] = len(__magic_name__ )
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__magic_name__ )}" )
lowerCamelCase_ : Any = self._execution_device
lowerCamelCase_ : Union[str, Any] = batch_size * num_images_per_prompt
lowerCamelCase_ : Any = guidance_scale > 1.0
lowerCamelCase_ : List[Any] = self._encode_image(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# prior
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
lowerCamelCase_ : str = self.scheduler.timesteps
lowerCamelCase_ : List[Any] = self.prior.config.num_embeddings
lowerCamelCase_ : Optional[Any] = self.prior.config.embedding_dim
lowerCamelCase_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ : int = latents.reshape(latents.shape[0] , __magic_name__ , __magic_name__ )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Optional[int] = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
lowerCamelCase_ : Union[str, Any] = self.prior(
__magic_name__ , timestep=__magic_name__ , proj_embedding=__magic_name__ , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ : List[str] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = noise_pred.chunk(2 )
lowerCamelCase_ : Any = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ : Union[str, Any] = self.scheduler.step(
__magic_name__ , timestep=__magic_name__ , sample=__magic_name__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__magic_name__ )
lowerCamelCase_ : List[Any] = []
for i, latent in enumerate(__magic_name__ ):
print()
lowerCamelCase_ : Any = self.renderer.decode(
latent[None, :] , __magic_name__ , size=__magic_name__ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__magic_name__ )
lowerCamelCase_ : Union[str, Any] = torch.stack(__magic_name__ )
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" )
lowerCamelCase_ : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ : List[Any] = [self.numpy_to_pil(__magic_name__ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__magic_name__ )
| 253 |
from collections.abc import Generator
from math import sin
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
if len(__UpperCAmelCase ) != 32:
raise ValueError("Input must be of length 32" )
lowerCamelCase_ : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __a ( __UpperCAmelCase : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ : Tuple = format(__UpperCAmelCase , "08x" )[-8:]
lowerCamelCase_ : int = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ : int = b""
for char in message:
bit_string += format(__UpperCAmelCase , "08b" ).encode("utf-8" )
lowerCamelCase_ : Optional[int] = format(len(__UpperCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __a ( __UpperCAmelCase : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__UpperCAmelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__UpperCAmelCase ) , 512 ):
lowerCamelCase_ : Union[str, Any] = bit_string[pos : pos + 512]
lowerCamelCase_ : Any = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __a ( __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
lowerCamelCase_ : Dict = format(__UpperCAmelCase , "032b" )
lowerCamelCase_ : Dict = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__UpperCAmelCase , 2 )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __a ( __UpperCAmelCase : bytes ) -> bytes:
"""simple docstring"""
lowerCamelCase_ : int = preprocess(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase_ : List[str] = 0X67_452_301
lowerCamelCase_ : Optional[int] = 0XEF_CDA_B89
lowerCamelCase_ : str = 0X98_BAD_CFE
lowerCamelCase_ : Optional[int] = 0X10_325_476
lowerCamelCase_ : Union[str, Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = aa
lowerCamelCase_ : List[str] = ba
lowerCamelCase_ : Optional[int] = ca
lowerCamelCase_ : List[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase_ : Dict = d ^ (b & (c ^ d))
lowerCamelCase_ : Any = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase_ : Any = c ^ (d & (b ^ c))
lowerCamelCase_ : List[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase_ : List[Any] = b ^ c ^ d
lowerCamelCase_ : int = (3 * i + 5) % 16
else:
lowerCamelCase_ : str = c ^ (b | not_aa(__UpperCAmelCase ))
lowerCamelCase_ : int = (7 * i) % 16
lowerCamelCase_ : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase_ : Union[str, Any] = d
lowerCamelCase_ : Optional[int] = c
lowerCamelCase_ : Union[str, Any] = b
lowerCamelCase_ : List[str] = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase_ : Tuple = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : List[str] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = sum_aa(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[int] = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Tuple = "SpeechT5FeatureExtractor"
A__ : List[Any] = "SpeechT5Tokenizer"
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
A__ = kwargs.pop("audio" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("text" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("text_target" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("audio_target" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
A__ = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif text is not None:
A__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = targets["input_values"]
elif text_target is not None:
A__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = targets["input_ids"]
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
A__ = kwargs.pop("input_values" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("input_ids" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("labels" , SCREAMING_SNAKE_CASE__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
A__ = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif input_ids is not None:
A__ = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = targets["input_ids"]
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = feature_size_hack
A__ = targets["input_values"]
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 104 |
def A_ ( _lowerCAmelCase ) -> bool:
return str(_lowerCAmelCase ) == str(_lowerCAmelCase )[::-1]
def A_ ( _lowerCAmelCase ) -> int:
return int(_lowerCAmelCase ) + int(str(_lowerCAmelCase )[::-1] )
def A_ ( _lowerCAmelCase = 1_0000 ) -> int:
UpperCamelCase : List[Any] = []
for num in range(1 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[str] = num
while iterations < 50:
UpperCamelCase : int = sum_reverse(_lowerCAmelCase )
iterations += 1
if is_palindrome(_lowerCAmelCase ):
break
else:
lychrel_nums.append(_lowerCAmelCase )
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 629 | 0 |
'''simple docstring'''
import math
def __UpperCamelCase( _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
while num > 0:
UpperCAmelCase__ : Union[str, Any] = num % 8
UpperCAmelCase__ : int = octal + (remainder * math.floor(math.pow(10 , _A ) ))
counter += 1
UpperCAmelCase__ : str = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(_A )}'''
def __UpperCamelCase( ):
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 496 |
'''simple docstring'''
def __UpperCamelCase( _A : int ):
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 496 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "mctct"
def __init__( self , _lowerCAmelCase=8065 , _lowerCAmelCase=1536 , _lowerCAmelCase=36 , _lowerCAmelCase=6144 , _lowerCAmelCase=4 , _lowerCAmelCase=384 , _lowerCAmelCase=920 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.3 , _lowerCAmelCase="relu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.3 , _lowerCAmelCase=0.3 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0.3 , _lowerCAmelCase=1 , _lowerCAmelCase=(7,) , _lowerCAmelCase=(3,) , _lowerCAmelCase=80 , _lowerCAmelCase=1 , _lowerCAmelCase=None , _lowerCAmelCase="sum" , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> int:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = attention_head_dim
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = layerdrop
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_range
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = conv_glu_dim
_lowerCAmelCase = conv_dropout
_lowerCAmelCase = num_conv_layers
_lowerCAmelCase = input_feat_per_channel
_lowerCAmelCase = input_channels
_lowerCAmelCase = conv_channels
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 18 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =StableDiffusionPanoramaPipeline
lowerCamelCase__ =TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ =TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case : List[Any] = DDIMScheduler()
torch.manual_seed(0 )
__snake_case : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__snake_case : Optional[int] = CLIPTextModel(a_ )
__snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
__snake_case : Optional[Any] = torch.manual_seed(a_ )
__snake_case : Tuple = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[Any] = self.get_dummy_components()
__snake_case : Dict = StableDiffusionPanoramaPipeline(**a_ )
__snake_case : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : List[Any] = self.get_dummy_inputs(a_ )
__snake_case : Dict = sd_pipe(**a_ ).images
__snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Optional[Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : Optional[Any] = self.get_dummy_components()
__snake_case : str = StableDiffusionPanoramaPipeline(**a_ )
__snake_case : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = self.get_dummy_inputs(a_ )
__snake_case : Any = '''french fries'''
__snake_case : int = sd_pipe(**a_ , negative_prompt=a_ )
__snake_case : Union[str, Any] = output.images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : str = self.get_dummy_components()
__snake_case : Dict = StableDiffusionPanoramaPipeline(**a_ )
__snake_case : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : str = self.get_dummy_inputs(a_ )
__snake_case : Any = sd_pipe(**a_ , view_batch_size=2 )
__snake_case : Tuple = output.images
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Optional[int] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__snake_case : Optional[int] = StableDiffusionPanoramaPipeline(**a_ )
__snake_case : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : str = self.get_dummy_inputs(a_ )
__snake_case : Union[str, Any] = sd_pipe(**a_ ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case : List[str] = self.get_dummy_components()
__snake_case : Optional[Any] = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=a_ )
__snake_case : Optional[int] = StableDiffusionPanoramaPipeline(**a_ )
__snake_case : List[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case : str = self.get_dummy_inputs(a_ )
__snake_case : int = sd_pipe(**a_ ).images
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : Tuple = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self , a_=0 ):
'''simple docstring'''
__snake_case : Dict = torch.manual_seed(a_ )
__snake_case : List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''stabilityai/stable-diffusion-2-base'''
__snake_case : List[str] = DDIMScheduler.from_pretrained(a_ , subfolder='''scheduler''' )
__snake_case : Dict = StableDiffusionPanoramaPipeline.from_pretrained(a_ , scheduler=a_ , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case : int = self.get_inputs()
__snake_case : Union[str, Any] = pipe(**a_ ).images
__snake_case : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__snake_case : List[str] = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=a_ )
__snake_case : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case : List[str] = self.get_inputs()
__snake_case : int = pipe(**a_ ).images
__snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__snake_case : Tuple = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = 0
def callback_fn(a_ , a_ , a_ ) -> None:
__snake_case : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__snake_case : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__snake_case : Union[str, Any] = latents[0, -3:, -3:, -1]
__snake_case : List[Any] = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__snake_case : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__snake_case : Optional[Any] = latents[0, -3:, -3:, -1]
__snake_case : Optional[Any] = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__snake_case : Dict = False
__snake_case : Optional[int] = '''stabilityai/stable-diffusion-2-base'''
__snake_case : Tuple = DDIMScheduler.from_pretrained(a_ , subfolder='''scheduler''' )
__snake_case : str = StableDiffusionPanoramaPipeline.from_pretrained(a_ , scheduler=a_ , safety_checker=a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case : Any = self.get_inputs()
pipe(**a_ , callback=a_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case : Optional[Any] = '''stabilityai/stable-diffusion-2-base'''
__snake_case : Union[str, Any] = DDIMScheduler.from_pretrained(a_ , subfolder='''scheduler''' )
__snake_case : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(a_ , scheduler=a_ , safety_checker=a_ )
__snake_case : Any = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case : Optional[Any] = self.get_inputs()
__snake_case : Dict = pipe(**a_ )
__snake_case : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 229 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
SCREAMING_SNAKE_CASE : str = data_utils.TransfoXLTokenizer
SCREAMING_SNAKE_CASE : str = data_utils.TransfoXLCorpus
SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils
SCREAMING_SNAKE_CASE : Any = data_utils
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : List[str] , _snake_case : str ) ->Optional[int]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_snake_case , '''rb''' ) as fp:
__snake_case : List[str] = pickle.load(_snake_case , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
__snake_case : List[Any] = corpus.vocab.__dict__
torch.save(_snake_case , _snake_case )
__snake_case : Tuple = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , _snake_case )
__snake_case : Dict = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(_snake_case , _snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__snake_case : Tuple = os.path.abspath(_snake_case )
__snake_case : int = os.path.abspath(_snake_case )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__snake_case : Union[str, Any] = TransfoXLConfig()
else:
__snake_case : Optional[int] = TransfoXLConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Union[str, Any] = TransfoXLLMHeadModel(_snake_case )
__snake_case : Optional[int] = load_tf_weights_in_transfo_xl(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : Optional[int] = os.path.join(_snake_case , _snake_case )
__snake_case : Tuple = os.path.join(_snake_case , _snake_case )
print(f"""Save PyTorch model to {os.path.abspath(_snake_case )}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {os.path.abspath(_snake_case )}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 229 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Dict = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
A = ['''pixel_values''']
def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = size if size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase_: Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_: Dict = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: int = do_resize
UpperCAmelCase_: Any = size
UpperCAmelCase_: List[Any] = resample
UpperCAmelCase_: int = do_center_crop
UpperCAmelCase_: Dict = crop_size
UpperCAmelCase_: Tuple = do_rescale
UpperCAmelCase_: Tuple = rescale_factor
UpperCAmelCase_: Dict = do_normalize
UpperCAmelCase_: Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_: int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCAmelCase_: Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_, size=(size["""height"""], size["""width"""]), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> int:
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image:
UpperCAmelCase_: Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_: Dict = resample if resample is not None else self.resample
UpperCAmelCase_: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_: List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_: List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_: Dict = image_std if image_std is not None else self.image_std
UpperCAmelCase_: Any = size if size is not None else self.size
UpperCAmelCase_: Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_: Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_, param_name="""crop_size""" )
UpperCAmelCase_: str = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase_: Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCAmelCase_: int = [self.resize(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCAmelCase_: Dict = [self.center_crop(image=SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCAmelCase_: Optional[int] = [self.rescale(image=SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCAmelCase_: int = [self.normalize(image=SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCAmelCase_: Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
| 556 |
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: Tuple = len(lowerCAmelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
UpperCAmelCase_: str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase_: int = left
UpperCAmelCase_: str = point
elif point > right:
UpperCAmelCase_: List[str] = right
UpperCAmelCase_: Optional[int] = point
else:
if item < current_item:
UpperCAmelCase_: Optional[Any] = point - 1
else:
UpperCAmelCase_: Any = point + 1
return None
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Any , lowerCAmelCase__: Any ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase_: Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase__ , lowerCAmelCase__ , point + 1 , lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
if collection != sorted(lowerCAmelCase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : int = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a : int = 67
a : Any = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 556 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A ( _lowerCamelCase = 2_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [0]
_lowerCAmelCase : Union[str, Any] = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCAmelCase : Union[str, Any] = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCAmelCase : Union[str, Any] = 0
# an estimate of b, using the quadratic formula
_lowerCAmelCase : Any = 42
# the largest integer less than b_estimate
_lowerCAmelCase : str = 42
# the largest integer less than b_estimate
_lowerCAmelCase : Dict = 42
# the triangle number corresponding to b_floor
_lowerCAmelCase : Any = 42
# the triangle number corresponding to b_ceil
_lowerCAmelCase : Tuple = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCAmelCase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCAmelCase : str = floor(a__ )
_lowerCAmelCase : List[str] = ceil(a__ )
_lowerCAmelCase : Tuple = triangle_numbers[b_floor]
_lowerCAmelCase : Union[str, Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCAmelCase : Any = triangle_b_first_guess * triangle_a
_lowerCAmelCase : str = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCAmelCase : str = triangle_b_second_guess * triangle_a
_lowerCAmelCase : Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = np.full((len(SCREAMING_SNAKE_CASE__ ), sequence_length, 2) , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = np.full((len(SCREAMING_SNAKE_CASE__ ), sequence_length) , SCREAMING_SNAKE_CASE__ )
for i, tensor in enumerate(SCREAMING_SNAKE_CASE__ ):
if padding_side == "right":
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
return out_tensor.tolist()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = ord(SCREAMING_SNAKE_CASE__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
snake_case_ = unicodedata.category(SCREAMING_SNAKE_CASE__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = -100
SCREAMING_SNAKE_CASE : str = "pt"
def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->str:
import torch
snake_case_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
snake_case_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
snake_case_ = self.tokenizer.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
snake_case_ = torch.tensor(batch['''entity_ids'''] ).shape[1]
snake_case_ = self.tokenizer.padding_side
if padding_side == "right":
snake_case_ = [
list(_UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCamelCase )) for label in labels
]
else:
snake_case_ = [
[self.label_pad_token_id] * (sequence_length - len(_UpperCamelCase )) + list(_UpperCamelCase ) for label in labels
]
snake_case_ = [feature['''ner_tags'''] for feature in features]
snake_case_ = padding_tensor(_UpperCamelCase , -1 , _UpperCamelCase , _UpperCamelCase )
snake_case_ = [feature['''original_entity_spans'''] for feature in features]
snake_case_ = padding_tensor(_UpperCamelCase , (-1, -1) , _UpperCamelCase , _UpperCamelCase )
snake_case_ = {k: torch.tensor(_UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 39 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""MaskFormerFeatureExtractor"""]
UpperCAmelCase_ = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
UpperCAmelCase_ = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 458 | 0 |
'''simple docstring'''
from typing import Any
def UpperCamelCase__ ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : dict , __magic_name__ : dict , __magic_name__ : dict , ) -> list:
'''simple docstring'''
_validation(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
# Creates data structures and fill initial step
snake_case__ : List[Any] = {}
snake_case__ : List[str] = {}
for state in states_space:
snake_case__ : Any = observations_space[0]
snake_case__ : Dict = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCAmelCase__ ) ):
snake_case__ : Tuple = observations_space[o]
snake_case__ : Dict = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ : Optional[int] = """"""
snake_case__ : Any = -1
for k_state in states_space:
snake_case__ : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ : List[Any] = probability
snake_case__ : List[str] = k_state
# Update probabilities and pointers dicts
snake_case__ : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ : List[Any] = arg_max
# The final observation
snake_case__ : Optional[Any] = observations_space[len(UpperCAmelCase__ ) - 1]
# argmax for given final observation
snake_case__ : Tuple = """"""
snake_case__ : Tuple = -1
for k_state in states_space:
snake_case__ : Optional[Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ : Optional[int] = probability
snake_case__ : Optional[int] = k_state
snake_case__ : Union[str, Any] = arg_max
# Process pointers backwards
snake_case__ : Optional[int] = last_state
snake_case__ : Dict = []
for o in range(len(UpperCAmelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCAmelCase__ )
snake_case__ : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
_validate_lists(UpperCAmelCase__ , UpperCAmelCase__ )
_validate_dicts(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Any ) -> None:
'''simple docstring'''
_validate_list(UpperCAmelCase__ , """observations_space""" )
_validate_list(UpperCAmelCase__ , """states_space""" )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , UpperCAmelCase__ ):
snake_case__ : List[str] = f"{var_name} must be a list"
raise ValueError(UpperCAmelCase__ )
else:
for x in _object:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case__ : List[Any] = f"{var_name} must be a list of strings"
raise ValueError(UpperCAmelCase__ )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(UpperCAmelCase__ , """initial_probabilities""" , UpperCAmelCase__ )
_validate_nested_dict(UpperCAmelCase__ , """transition_probabilities""" )
_validate_nested_dict(UpperCAmelCase__ , """emission_probabilities""" )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , UpperCAmelCase__ , UpperCAmelCase__ )
for x in _object.values():
_validate_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : type , __magic_name__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , UpperCAmelCase__ ):
snake_case__ : str = f"{var_name} must be a dict"
raise ValueError(UpperCAmelCase__ )
if not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for x in _object ):
snake_case__ : List[str] = f"{var_name} all keys must be strings"
raise ValueError(UpperCAmelCase__ )
if not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for x in _object.values() ):
snake_case__ : Union[str, Any] = """nested dictionary """ if nested else """"""
snake_case__ : Union[str, Any] = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(UpperCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaImgaImgPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : Optional[int] = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.dummy_unet
snake_case__ : Any = self.dummy_movq
snake_case__ : Any = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case__ : int = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
snake_case__ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
snake_case__ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : str = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : Union[str, Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = """cpu"""
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : List[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Dict = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[int] = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
snake_case__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case__ : int = """A red cartoon frog, 4k"""
snake_case__ : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
snake_case__ : Any = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ , snake_case__ : int = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : str = pipeline(
image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
snake_case__ : List[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 419 | 0 |
class _A :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase ={}
def _a ( self : str ) -> None:
print(self.vertex )
for i in self.vertex:
print(__SCREAMING_SNAKE_CASE , """ -> """ , """ -> """.join([str(__SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
__UpperCAmelCase =[to_vertex]
def _a ( self : Tuple ) -> None:
# visited array for storing already visited nodes
__UpperCAmelCase =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ) -> None:
# mark start vertex as visited
__UpperCAmelCase =True
print(__SCREAMING_SNAKE_CASE , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 68 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowercase__ ( A_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A_ , AssumeRolePolicyDocument=json.dumps(A_ , indent=2 ) )
__UpperCAmelCase ={
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def lowercase__ ( A_: Dict ) -> Any:
"""simple docstring"""
__UpperCAmelCase =botoa.client("""iam""" )
return iam_client.get_role(RoleName=A_ )["Role"]["Arn"]
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =_ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , A_ , )
__UpperCAmelCase =None
if credentials_configuration == 0:
__UpperCAmelCase =_ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
__UpperCAmelCase =aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
__UpperCAmelCase =_ask_field("""AWS Access Key ID: """ )
__UpperCAmelCase =aws_access_key_id
__UpperCAmelCase =_ask_field("""AWS Secret Access Key: """ )
__UpperCAmelCase =aws_secret_access_key
__UpperCAmelCase =_ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
__UpperCAmelCase =aws_region
__UpperCAmelCase =_ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , A_ , )
if role_management == 0:
__UpperCAmelCase =_ask_field("""Enter your IAM role name: """ )
else:
__UpperCAmelCase ="""accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A_ )
__UpperCAmelCase =_ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_custom_docker_image:
__UpperCAmelCase =_ask_field("""Enter your Docker image: """ , lambda A_ : str(A_ ).lower() )
__UpperCAmelCase =_ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_inputs_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =None
if is_sagemaker_metrics_enabled:
__UpperCAmelCase =_ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda A_ : str(A_ ).lower() , )
__UpperCAmelCase =_ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
__UpperCAmelCase ={}
__UpperCAmelCase =_ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
__UpperCAmelCase ="""dynamo_"""
__UpperCAmelCase =_ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__UpperCAmelCase =_ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
__UpperCAmelCase =_ask_options(
"""Which mode do you want to use?""" , A_ , lambda A_ : TORCH_DYNAMO_MODES[int(A_ )] , default="""default""" , )
__UpperCAmelCase =_ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase =_ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=A_ , error_message="""Please enter yes or no.""" , )
__UpperCAmelCase ="""Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
__UpperCAmelCase =_ask_options(
A_ , A_ , lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCAmelCase =_ask_field(A_ , lambda A_ : str(A_ ).lower() , default="""ml.p3.2xlarge""" )
__UpperCAmelCase =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCAmelCase =_ask_field(
"""How many machines do you want use? [1]: """ , A_ , default=1 , )
__UpperCAmelCase =_ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=A_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A_ , use_cpu=A_ , dynamo_config=A_ , eca_instance_type=A_ , profile=A_ , region=A_ , iam_role_name=A_ , mixed_precision=A_ , num_machines=A_ , sagemaker_inputs_file=A_ , sagemaker_metrics_file=A_ , )
| 68 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = DDIMPipeline
_lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> List[Any]:
torch.manual_seed(0 )
A = UNetaDModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
A = DDIMScheduler()
A = {"""unet""": unet, """scheduler""": scheduler}
return components
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=0 ) -> str:
if str(lowerCamelCase_ ).startswith("""mps""" ):
A = torch.manual_seed(lowerCamelCase_ )
else:
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
A = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = self.get_dummy_inputs(lowerCamelCase_ )
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 3_2, 3_2, 3) )
A = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ ,1E-3 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase__ ( self ) -> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Tuple:
A = """google/ddpm-cifar10-32"""
A = UNetaDModel.from_pretrained(lowerCamelCase_ )
A = DDIMScheduler()
A = DDIMPipeline(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
A = torch.manual_seed(0 )
A = ddim(generator=lowerCamelCase_ ,eta=0.0 ,output_type="""numpy""" ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ) -> Dict:
A = """google/ddpm-ema-bedroom-256"""
A = UNetaDModel.from_pretrained(lowerCamelCase_ )
A = DDIMScheduler.from_pretrained(lowerCamelCase_ )
A = DDIMPipeline(unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
A = torch.manual_seed(0 )
A = ddpm(generator=lowerCamelCase_ ,output_type="""numpy""" ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 255 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Dict:
A = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
A = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
A = model(lowerCamelCase_ )["""last_hidden_state"""]
A = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,lowerCamelCase_ )
# compare the actual values for a slice.
A = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 255 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase__ ( A_ ): # picklable for multiprocessing
return x.sum()
def lowerCamelCase__ ( A_ ): # picklable for multiprocessing
return i + 1
@dataclass
class lowercase_ :
a_ = 42
a_ = 42
class lowercase_ ( _A ):
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 1
UpperCAmelCase_ = [1, 2]
UpperCAmelCase_ = {"a": 1, "b": 2}
UpperCAmelCase_ = {"a": [1, 2], "b": [3, 4]}
UpperCAmelCase_ = {"a": {"1": 1}, "b": 2}
UpperCAmelCase_ = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = [2, 3]
UpperCAmelCase_ = {"a": 2, "b": 3}
UpperCAmelCase_ = {"a": [2, 3], "b": [4, 5]}
UpperCAmelCase_ = {"a": {"1": 2}, "b": 3}
UpperCAmelCase_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase_ = 2
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
UpperCAmelCase_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCAmelCase_ = {"a": 2, "b": 0, "c": 2}
UpperCAmelCase_ = {
"a": np.eye(2 ).astype(UpperCamelCase__ ),
"b": np.zeros(3 ).astype(UpperCamelCase__ ),
"c": np.ones(2 ).astype(UpperCamelCase__ ),
}
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ , num_proc=UpperCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCamelCase__ ): # can't pickle a local lambda
map_nested(lambda UpperCamelCase__ : x + 1 , UpperCamelCase__ , num_proc=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = {"a": 1, "b": 2}
UpperCAmelCase_ = {"a": 3, "b": 4}
UpperCAmelCase_ = {"a": 5, "b": 6}
UpperCAmelCase_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
class lowercase_ :
a_ = """bar"""
UpperCAmelCase_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCamelCase__ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase__ ( A_ , A_ , A_ ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCAmelCase_ = {F"""{i}""": i for i in range(A_ )}
UpperCAmelCase_ = map_nested(lambda A_ : x + 10 , A_ , num_proc=A_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase_ ( _A ):
@require_tf
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase_ = layers.Dense(2 )
def gen_random_output():
UpperCAmelCase_ = tf.random.uniform((1, 3) )
return model(UpperCamelCase__ ).numpy()
with temp_seed(4_2 , set_tensorflow=UpperCamelCase__ ):
UpperCAmelCase_ = gen_random_output()
with temp_seed(4_2 , set_tensorflow=UpperCamelCase__ ):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
import torch
def gen_random_output():
UpperCAmelCase_ = torch.nn.Linear(3 , 2 )
UpperCAmelCase_ = torch.rand(1 , 3 )
return model(UpperCamelCase__ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=UpperCamelCase__ ):
UpperCAmelCase_ = gen_random_output()
with temp_seed(4_2 , set_pytorch=UpperCamelCase__ ):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
UpperCAmelCase_ = gen_random_output()
with temp_seed(4_2 ):
UpperCAmelCase_ = gen_random_output()
UpperCAmelCase_ = gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def lowerCamelCase__ ( A_ ):
UpperCAmelCase_ = NestedDataStructure(A_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = NestedDataStructure(A_ ).flatten()
assert output == expected_output
def lowerCamelCase__ ( ):
UpperCAmelCase_ = A(x=1 , y="foobar" )
UpperCAmelCase_ = {"x": 1, "y": "foobar"}
assert asdict(A_ ) == expected_output
UpperCAmelCase_ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCAmelCase_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(A_ ) == expected_output
with pytest.raises(A_ ):
asdict([1, A(x=10 , y="foo" )] )
def lowerCamelCase__ ( A_ ):
return text.split()
def lowerCamelCase__ ( A_ ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase__ ( ):
with Pool(2 ) as pool:
UpperCAmelCase_ = list(iflatmap_unordered(A_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(A_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase_ = list(iflatmap_unordered(A_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(A_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase_ = []
for yield_time, content in iflatmap_unordered(
A_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(A_ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(A_ ) == 4
| 660 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__snake_case : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase_ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=1_6 , UpperCamelCase__=1_3 , UpperCamelCase__=7 , UpperCamelCase__=1_4 , UpperCamelCase__=1_0 , UpperCamelCase__=1_9 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=1_6 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=2_5 , UpperCamelCase__=5 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = d_model
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length
UpperCAmelCase_ = cardinality
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = embedding_dimension
UpperCAmelCase_ = is_training
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = context_length
UpperCAmelCase_ = prediction_length + label_length
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = config.context_length + max(config.lags_sequence )
UpperCAmelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def lowerCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
UpperCAmelCase_ = model(**UpperCamelCase__ )
UpperCAmelCase_ = outputs.encoder_last_hidden_state
UpperCAmelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = model.create_network_inputs(**UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCAmelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( _A , _A , unittest.TestCase ):
a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a_ = (AutoformerForPrediction,) if is_torch_available() else ()
a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = inspect.signature(getattr(UpperCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
UpperCAmelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "d_model" , UpperCamelCase__ )
UpperCAmelCase_ = getattr(self.model_tester , "num_attention_heads" , UpperCamelCase__ )
UpperCAmelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase_ = len(UpperCamelCase__ )
UpperCAmelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
UpperCAmelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( A_="train-batch.pt" ):
UpperCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A_ , repo_type="dataset" )
UpperCAmelCase_ = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch()
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
UpperCAmelCase_ = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
UpperCAmelCase_ = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCamelCase__ )
UpperCAmelCase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
UpperCAmelCase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
UpperCAmelCase_ = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
UpperCAmelCase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
UpperCAmelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 660 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 42
snake_case_ = jnp.floataa
snake_case_ = True
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setup()
A__ : List[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[int] , *snake_case : Any , **snake_case : str ):
'''simple docstring'''
A__ : int = super().__call__(*snake_case , **snake_case )
A__ : str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = FlaxBigBirdForNaturalQuestionsModule
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Dict ) ->Tuple:
def cross_entropy(UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any]=None ):
A__ : Optional[Any] = logits.shape[-1]
A__ : Union[str, Any] = (labels[..., None] == jnp.arange(UpperCAmelCase__ )[None]).astype("""f4""" )
A__ : Optional[Any] = jax.nn.log_softmax(UpperCAmelCase__, axis=-1 )
A__ : Dict = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
A__ : List[Any] = reduction(UpperCAmelCase__ )
return loss
A__ : Tuple = partial(UpperCAmelCase__, reduction=jnp.mean )
A__ : Tuple = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A__ : List[str] = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A__ : List[Any] = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = "google/bigbird-roberta-base"
snake_case_ = 3000
snake_case_ = 10500
snake_case_ = 128
snake_case_ = 3
snake_case_ = 1
snake_case_ = 5
# tx_args
snake_case_ = 3E-5
snake_case_ = 0.0
snake_case_ = 20000
snake_case_ = 0.0_0_9_5
snake_case_ = "bigbird-roberta-natural-questions"
snake_case_ = "training-expt"
snake_case_ = "data/nq-training.jsonl"
snake_case_ = "data/nq-validation.jsonl"
def _UpperCamelCase ( self : str ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=snake_case )
A__ : str = os.path.join(self.base_dir , self.save_dir )
A__ : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = 4096 # no dynamic padding on TPUs
def __call__( self : str , snake_case : Dict ):
'''simple docstring'''
A__ : Optional[int] = self.collate_fn(snake_case )
A__ : List[Any] = jax.tree_util.tree_map(snake_case , snake_case )
return batch
def _UpperCamelCase ( self : int , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
A__ : Union[str, Any] = {
"""input_ids""": jnp.array(snake_case , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _UpperCamelCase ( self : Optional[Any] , snake_case : list ):
'''simple docstring'''
A__ : str = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : list ):
'''simple docstring'''
A__ : str = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[str]=None ) ->Dict:
if seed is not None:
A__ : Optional[int] = dataset.shuffle(seed=UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) // batch_size ):
A__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase__ )
@partial(jax.pmap, axis_name="""batch""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any], **UpperCAmelCase__ : str ) ->Dict:
def loss_fn(UpperCAmelCase__ : Union[str, Any] ):
A__ : Any = model_inputs.pop("""start_labels""" )
A__ : int = model_inputs.pop("""end_labels""" )
A__ : Any = model_inputs.pop("""pooled_labels""" )
A__ : List[Any] = state.apply_fn(**UpperCAmelCase__, params=UpperCAmelCase__, dropout_rng=UpperCAmelCase__, train=UpperCAmelCase__ )
A__ , A__ , A__ : List[str] = outputs
return state.loss_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A__ , A__ : Optional[int] = jax.random.split(UpperCAmelCase__ )
A__ : Optional[Any] = jax.value_and_grad(UpperCAmelCase__ )
A__ , A__ : Union[str, Any] = grad_fn(state.params )
A__ : Dict = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
A__ : int = jax.lax.pmean(UpperCAmelCase__, """batch""" )
A__ : Any = state.apply_gradients(grads=UpperCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name="""batch""" )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], **UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Any = model_inputs.pop("""start_labels""" )
A__ : Tuple = model_inputs.pop("""end_labels""" )
A__ : str = model_inputs.pop("""pooled_labels""" )
A__ : Dict = state.apply_fn(**UpperCAmelCase__, params=state.params, train=UpperCAmelCase__ )
A__ , A__ , A__ : str = outputs
A__ : Union[str, Any] = state.loss_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ : Optional[int] = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
return metrics
class __SCREAMING_SNAKE_CASE ( train_state.TrainState ):
snake_case_ = struct.field(pytree_node=UpperCamelCase )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
def _UpperCamelCase ( self : Optional[int] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : int=None ):
'''simple docstring'''
A__ : Tuple = model.params
A__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=snake_case , tx=snake_case , loss_fn=snake_case , )
if ckpt_dir is not None:
A__ , A__ , A__ , A__ , A__ : str = restore_checkpoint(snake_case , snake_case )
A__ : Dict = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
A__ , A__ : int = build_tx(**snake_case )
A__ : Tuple = train_state.TrainState(
step=snake_case , apply_fn=model.__call__ , params=snake_case , tx=snake_case , opt_state=snake_case , )
A__ : Union[str, Any] = args
A__ : Any = data_collator
A__ : Any = lr
A__ : Dict = params
A__ : Union[str, Any] = jax_utils.replicate(snake_case )
return state
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Union[str, Any] , snake_case : int ):
'''simple docstring'''
A__ : Tuple = self.args
A__ : List[str] = len(snake_case ) // args.batch_size
A__ : List[str] = jax.random.PRNGKey(0 )
A__ : Tuple = jax.random.split(snake_case , jax.device_count() )
for epoch in range(args.max_epochs ):
A__ : Optional[int] = jnp.array(0 , dtype=jnp.floataa )
A__ : int = get_batched_dataset(snake_case , args.batch_size , seed=snake_case )
A__ : Optional[int] = 0
for batch in tqdm(snake_case , total=snake_case , desc=F'Running EPOCH-{epoch}' ):
A__ : Optional[Any] = self.data_collator(snake_case )
A__ , A__ , A__ : List[str] = self.train_step_fn(snake_case , snake_case , **snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
A__ : Optional[Any] = jax_utils.unreplicate(state.step )
A__ : List[Any] = running_loss.item() / i
A__ : Union[str, Any] = self.scheduler_fn(state_step - 1 )
A__ : str = self.evaluate(snake_case , snake_case )
A__ : List[str] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case , commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=snake_case )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = get_batched_dataset(snake_case , self.args.batch_size )
A__ : str = len(snake_case ) // self.args.batch_size
A__ : Optional[int] = jnp.array(0 , dtype=jnp.floataa )
A__ : Optional[int] = 0
for batch in tqdm(snake_case , total=snake_case , desc="""Evaluating ... """ ):
A__ : Any = self.data_collator(snake_case )
A__ : Dict = self.val_step_fn(snake_case , **snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _UpperCamelCase ( self : List[Any] , snake_case : Any , snake_case : str ):
'''simple docstring'''
A__ : int = jax_utils.unreplicate(snake_case )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(snake_case , params=state.params )
with open(os.path.join(snake_case , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(snake_case , """data_collator.joblib""" ) )
with open(os.path.join(snake_case , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , snake_case )
print("""DONE""" )
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict ) ->List[Any]:
print(f'RESTORING CHECKPOINT FROM {save_dir}', end=""" ... """ )
with open(os.path.join(UpperCAmelCase__, """flax_model.msgpack""" ), """rb""" ) as f:
A__ : Optional[Any] = from_bytes(state.params, f.read() )
with open(os.path.join(UpperCAmelCase__, """opt_state.msgpack""" ), """rb""" ) as f:
A__ : Optional[Any] = from_bytes(state.opt_state, f.read() )
A__ : Optional[int] = joblib.load(os.path.join(UpperCAmelCase__, """args.joblib""" ) )
A__ : Union[str, Any] = joblib.load(os.path.join(UpperCAmelCase__, """data_collator.joblib""" ) )
with open(os.path.join(UpperCAmelCase__, """training_state.json""" ), """r""" ) as f:
A__ : Optional[Any] = json.load(UpperCAmelCase__ )
A__ : Union[str, Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any, UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : List[Any] = num_train_steps - warmup_steps
A__ : Tuple = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=UpperCAmelCase__, transition_steps=UpperCAmelCase__ )
A__ : Union[str, Any] = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=1e-7, transition_steps=UpperCAmelCase__ )
A__ : List[str] = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ) ->Optional[Any]:
def weight_decay_mask(UpperCAmelCase__ : List[str] ):
A__ : Union[str, Any] = traverse_util.flatten_dict(UpperCAmelCase__ )
A__ : Dict = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase__ )
A__ : int = scheduler_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ : str = optax.adamw(learning_rate=UpperCAmelCase__, weight_decay=UpperCAmelCase__, mask=UpperCAmelCase__ )
return tx, lr
| 498 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'roc_bert'
def __init__( self : Tuple , snake_case : Union[str, Any]=3_0522 , snake_case : Optional[Any]=768 , snake_case : Any=12 , snake_case : Tuple=12 , snake_case : Any=3072 , snake_case : Tuple="gelu" , snake_case : Optional[int]=0.1 , snake_case : List[str]=0.1 , snake_case : List[str]=512 , snake_case : int=2 , snake_case : Optional[Any]=0.02 , snake_case : str=1e-12 , snake_case : int=True , snake_case : Optional[Any]=0 , snake_case : str="absolute" , snake_case : int=None , snake_case : int=True , snake_case : Optional[int]=True , snake_case : List[Any]=768 , snake_case : Dict=910 , snake_case : Tuple=512 , snake_case : Tuple=2_4858 , snake_case : Optional[Any]=True , **snake_case : Any , ):
'''simple docstring'''
A__ : Optional[Any] = vocab_size
A__ : Union[str, Any] = max_position_embeddings
A__ : Any = hidden_size
A__ : Optional[int] = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : int = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Union[str, Any] = initializer_range
A__ : str = type_vocab_size
A__ : Any = layer_norm_eps
A__ : List[str] = use_cache
A__ : List[str] = enable_pronunciation
A__ : int = enable_shape
A__ : Tuple = pronunciation_embed_dim
A__ : str = pronunciation_vocab_size
A__ : Optional[Any] = shape_embed_dim
A__ : str = shape_vocab_size
A__ : List[str] = concat_input
A__ : str = position_embedding_type
A__ : str = classifier_dropout
super().__init__(pad_token_id=snake_case , **snake_case )
| 498 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=False ) -> Any:
lowerCAmelCase__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : Optional[Any] = ""
else:
lowerCAmelCase__ : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> Dict:
lowerCAmelCase__ : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
lowerCAmelCase__ : Optional[int] = dct.pop(lowerCAmelCase__ )
lowerCAmelCase__ : List[str] = val
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCAmelCase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ : List[str] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> Dict:
lowerCAmelCase__ : List[Any] = ViTConfig()
lowerCAmelCase__ : Optional[int] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Dict = int(vit_name[-1_2:-1_0] )
lowerCAmelCase__ : Optional[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : str = 1_0_0_0
lowerCAmelCase__ : Optional[Any] = "huggingface/label-files"
lowerCAmelCase__ : Optional[int] = "imagenet-1k-id2label.json"
lowerCAmelCase__ : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ : Any = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-6:-4] )
lowerCAmelCase__ : Any = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
lowerCAmelCase__ : List[str] = 1_9_2
lowerCAmelCase__ : List[str] = 7_6_8
lowerCAmelCase__ : str = 1_2
lowerCAmelCase__ : List[Any] = 3
elif vit_name[9:].startswith("small" ):
lowerCAmelCase__ : Optional[Any] = 3_8_4
lowerCAmelCase__ : Dict = 1_5_3_6
lowerCAmelCase__ : Optional[int] = 1_2
lowerCAmelCase__ : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
lowerCAmelCase__ : Optional[int] = 7_6_8
lowerCAmelCase__ : Union[str, Any] = 2_3_0_4
lowerCAmelCase__ : int = 8
lowerCAmelCase__ : Any = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
lowerCAmelCase__ : List[Any] = 1_0_2_4
lowerCAmelCase__ : Dict = 4_0_9_6
lowerCAmelCase__ : str = 2_4
lowerCAmelCase__ : Union[str, Any] = 1_6
elif vit_name[4:].startswith("huge" ):
lowerCAmelCase__ : int = 1_2_8_0
lowerCAmelCase__ : str = 5_1_2_0
lowerCAmelCase__ : List[Any] = 3_2
lowerCAmelCase__ : Union[str, Any] = 1_6
# load original model from timm
lowerCAmelCase__ : Optional[Any] = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase__ )
lowerCAmelCase__ : List[str] = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Optional[Any] = ViTModel(lowerCAmelCase__ ).eval()
else:
lowerCAmelCase__ : str = ViTForImageClassification(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : str = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : Optional[Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : Tuple = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase__ : int = encoding["pixel_values"]
lowerCAmelCase__ : List[Any] = model(lowerCAmelCase__ )
if base_model:
lowerCAmelCase__ : List[str] = timm_model.forward_features(lowerCAmelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase__ , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase__ : List[str] = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 453 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
SCREAMING_SNAKE_CASE : Optional[int] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
SCREAMING_SNAKE_CASE : List[Any] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'''emoji''': True,
},
}
]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
SCREAMING_SNAKE_CASE : Optional[Any] = 0
with open(log, '''r''') as f:
for line in f:
SCREAMING_SNAKE_CASE : str = json.loads(line)
if line.get('''nodeid''', '''''') != "":
SCREAMING_SNAKE_CASE : Optional[Any] = line['''nodeid''']
if line.get('''duration''', None) is not None:
SCREAMING_SNAKE_CASE : str = f'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
SCREAMING_SNAKE_CASE : Dict = []
log.unlink()
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : str = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Dict = {}
for test in failed_tests:
SCREAMING_SNAKE_CASE : Optional[Any] = test[0].split('''::''')
SCREAMING_SNAKE_CASE : Optional[int] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
SCREAMING_SNAKE_CASE : Union[str, Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
SCREAMING_SNAKE_CASE : Union[str, Any] = [test[0] for test in failed_table]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
SCREAMING_SNAKE_CASE : Any = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
SCREAMING_SNAKE_CASE : int = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
SCREAMING_SNAKE_CASE : str = '''Too many failed tests, please see the full report in the Action results.'''
SCREAMING_SNAKE_CASE : List[Any] = len(err) + 10
SCREAMING_SNAKE_CASE : Dict = message[: 3_000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
SCREAMING_SNAKE_CASE : Any = '''No failed tests! 🤗'''
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE : str = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
SCREAMING_SNAKE_CASE : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
SCREAMING_SNAKE_CASE : str = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
SCREAMING_SNAKE_CASE : List[str] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
SCREAMING_SNAKE_CASE : Optional[Any] = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
SCREAMING_SNAKE_CASE : Tuple = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
SCREAMING_SNAKE_CASE : str = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
SCREAMING_SNAKE_CASE : List[str] = row[0]
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 260 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''nat'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , A_ : Dict=4 , A_ : int=3 , A_ : Any=64 , A_ : Any=[3, 4, 6, 5] , A_ : Any=[2, 4, 8, 16] , A_ : Optional[Any]=7 , A_ : List[Any]=3.0 , A_ : Tuple=True , A_ : int=0.0 , A_ : int=0.0 , A_ : Optional[Any]=0.1 , A_ : str="gelu" , A_ : Tuple=0.02 , A_ : Tuple=1E-5 , A_ : List[Any]=0.0 , A_ : Any=None , A_ : Any=None , **A_ : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = kernel_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(A_ ) + 1 )]
lowerCamelCase_ , lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 721 |
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(lowercase , lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase , lowercase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def _SCREAMING_SNAKE_CASE ( lowercase : int = 2 ):
'''simple docstring'''
lowerCamelCase_ = 1.0
for fraction in fraction_list(lowercase ):
lowerCamelCase_ = Fraction(lowercase )
result *= frac.denominator / frac.numerator
return int(lowercase )
if __name__ == "__main__":
print(solution())
| 651 | 0 |
from pathlib import Path
import fire
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Path(_UpperCamelCase)
UpperCamelCase = Path(_UpperCamelCase)
dest_dir.mkdir(exist_ok=_UpperCamelCase)
for path in src_dir.iterdir():
UpperCamelCase = [x.rstrip() for x in list(path.open().readlines())][:n]
UpperCamelCase = dest_dir.joinpath(path.name)
print(_UpperCamelCase)
dest_path.open('w').write('\n'.join(_UpperCamelCase))
if __name__ == "__main__":
fire.Fire(minify)
| 280 |
import torch
from torch import nn
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
else:
self.out_projs.append(_SCREAMING_SNAKE_CASE )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , r_idx - l_idx ) )
UpperCamelCase = keep_order
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -100
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , _SCREAMING_SNAKE_CASE ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden.index_select(0 , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _SCREAMING_SNAKE_CASE , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 280 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """spiece.model"""}
__snake_case ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__snake_case ={
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]="[CLS]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Tuple , ) -> None:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : int , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Optional[int]:
lowerCAmelCase = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[str] ) -> Any:
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : int , ) -> str:
lowerCAmelCase = kwargs.pop('use_source_tokenizer' , UpperCAmelCase__ )
lowerCAmelCase = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
lowerCAmelCase = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(UpperCAmelCase__ ) )
else:
lowerCAmelCase = ''.join(UpperCAmelCase__ )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 701 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=None ) -> str:
lowerCAmelCase = data
lowerCAmelCase = previous
lowerCAmelCase = next_node
def __str__( self : int ) -> str:
return F'''{self.data}'''
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return self.data
def __UpperCAmelCase ( self : List[str] ) -> Dict:
return self.next
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
return self.previous
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Any] ) -> List[str]:
lowerCAmelCase = head
def __iter__( self : Tuple ) -> Dict:
return self
def __UpperCAmelCase ( self : Any ) -> List[Any]:
if not self.current:
raise StopIteration
else:
lowerCAmelCase = self.current.get_data()
lowerCAmelCase = self.current.get_next()
return value
class UpperCAmelCase_ :
def __init__( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = None # First node in list
lowerCAmelCase = None # Last node in list
def __str__( self : List[Any] ) -> Dict:
lowerCAmelCase = self.head
lowerCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase = current.get_next()
return " ".join(str(UpperCAmelCase__ ) for node in nodes )
def __contains__( self : List[str] , UpperCAmelCase__ : int ) -> Optional[Any]:
lowerCAmelCase = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase = current.get_next()
return False
def __iter__( self : int ) -> Any:
return LinkedListIterator(self.head )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
if self.head:
return self.head.get_data()
return None
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Node ) -> None:
if self.head is None:
lowerCAmelCase = node
lowerCAmelCase = node
else:
self.insert_before_node(self.head , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Node ) -> None:
if self.head is None:
self.set_head(UpperCAmelCase__ )
else:
self.insert_after_node(self.tail , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> None:
lowerCAmelCase = Node(UpperCAmelCase__ )
if self.head is None:
self.set_head(UpperCAmelCase__ )
else:
self.set_tail(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> None:
lowerCAmelCase = node
lowerCAmelCase = node.previous
if node.get_previous() is None:
lowerCAmelCase = node_to_insert
else:
lowerCAmelCase = node_to_insert
lowerCAmelCase = node_to_insert
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> None:
lowerCAmelCase = node
lowerCAmelCase = node.next
if node.get_next() is None:
lowerCAmelCase = node_to_insert
else:
lowerCAmelCase = node_to_insert
lowerCAmelCase = node_to_insert
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> None:
lowerCAmelCase = 1
lowerCAmelCase = Node(UpperCAmelCase__ )
lowerCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCAmelCase__ , UpperCAmelCase__ )
return
current_position += 1
lowerCAmelCase = node.next
self.insert_after_node(self.tail , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Node:
lowerCAmelCase = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase = node.get_next()
raise Exception('Node not found' )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> Dict:
if (node := self.get_node(UpperCAmelCase__ )) is not None:
if node == self.head:
lowerCAmelCase = self.head.get_next()
if node == self.tail:
lowerCAmelCase = self.tail.get_previous()
self.remove_node_pointers(UpperCAmelCase__ )
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : Node ) -> None:
if node.get_next():
lowerCAmelCase = node.previous
if node.get_previous():
lowerCAmelCase = node.next
lowerCAmelCase = None
lowerCAmelCase = None
def __UpperCAmelCase ( self : Any ) -> List[str]:
return self.head is None
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = DanceDiffusionPipeline
UpperCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
UpperCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
torch.manual_seed(0 )
__magic_name__: Dict = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__snake_case , use_timestep_embedding=__snake_case , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
__magic_name__: Tuple = IPNDMScheduler()
__magic_name__: Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str]=0 ) -> str:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: List[str] = torch.manual_seed(__snake_case )
else:
__magic_name__: Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: int = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase__ ( self : str ) -> Optional[int]:
__magic_name__: int = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: List[Any] = self.get_dummy_components()
__magic_name__: Dict = DanceDiffusionPipeline(**__snake_case )
__magic_name__: List[str] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Union[str, Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: Any = pipe(**__snake_case )
__magic_name__: int = output.audios
__magic_name__: Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__magic_name__: Optional[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
return super().test_save_load_local()
@skip_mps
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase__ ( self : Tuple ) -> Any:
return super().test_attention_slicing_forward_pass()
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
__magic_name__: Tuple = torch_device
__magic_name__: Union[str, Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
__magic_name__: Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = torch.manual_seed(0 )
__magic_name__: Any = pipe(generator=__snake_case , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
__magic_name__: Union[str, Any] = output.audios
__magic_name__: Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__magic_name__: int = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : int ) -> int:
__magic_name__: int = torch_device
__magic_name__: List[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
__magic_name__: List[str] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Tuple = torch.manual_seed(0 )
__magic_name__: List[Any] = pipe(generator=__snake_case , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
__magic_name__: str = output.audios
__magic_name__: Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__magic_name__: Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 96 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''camembert-base''': 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Any = '''▁'''
class a__( snake_case__ ):
a_ : Tuple = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : str = ['''input_ids''', '''attention_mask''']
a_ : Union[str, Any] = CamembertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ =AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case__ =vocab_file
snake_case__ =False if not self.vocab_file else True
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ =[self.cls_token_id]
snake_case__ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
snake_case__ =[self.sep_token_id]
snake_case__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ =os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 538 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = 128
elif "12-12" in model_name:
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
__SCREAMING_SNAKE_CASE = 14
__SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 16
else:
raise ValueError("Model not supported" )
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = 35
__SCREAMING_SNAKE_CASE = "speech-commands-v2-id2label.json"
else:
__SCREAMING_SNAKE_CASE = 527
__SCREAMING_SNAKE_CASE = "audioset-id2label.json"
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if "module.v" in name:
__SCREAMING_SNAKE_CASE = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
__SCREAMING_SNAKE_CASE = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
__SCREAMING_SNAKE_CASE = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
__SCREAMING_SNAKE_CASE = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu" )
# remove some keys
remove_keys(lowerCAmelCase_ )
# rename some keys
__SCREAMING_SNAKE_CASE = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
# load 🤗 model
__SCREAMING_SNAKE_CASE = ASTForAudioClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__SCREAMING_SNAKE_CASE = -4.2677393 if "speech-commands" not in model_name else -6.845978
__SCREAMING_SNAKE_CASE = 4.5689974 if "speech-commands" not in model_name else 5.5654526
__SCREAMING_SNAKE_CASE = 1024 if "speech-commands" not in model_name else 128
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=lowerCAmelCase_ , std=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
if "speech-commands" in model_name:
__SCREAMING_SNAKE_CASE = load_dataset("speech_commands" , "v0.02" , split="validation" )
__SCREAMING_SNAKE_CASE = dataset[0]["audio"]["array"]
else:
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torchaudio.load(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCAmelCase_ , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__SCREAMING_SNAKE_CASE = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ : Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 553 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Any:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int ) -> Any:
__SCREAMING_SNAKE_CASE = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
__SCREAMING_SNAKE_CASE = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCAmelCase__ , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase__ , py_version="py36" , )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
# create estimator
__SCREAMING_SNAKE_CASE = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__SCREAMING_SNAKE_CASE = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 553 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
_A = get_failure_array(_snake_case )
# 2) Step through text searching for pattern
_A , _A = 0, 0 # index into text, pattern
while i < len(_snake_case ):
if pattern[j] == text[i]:
if j == (len(_snake_case ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_A = failure[j - 1]
continue
i += 1
return False
def _snake_case ( _snake_case : str ) -> list[int]:
'''simple docstring'''
_A = [0]
_A = 0
_A = 1
while j < len(_snake_case ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_A = failure[i - 1]
continue
j += 1
failure.append(_snake_case )
return failure
if __name__ == "__main__":
# Test 1)
a = '''abc1abc12'''
a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
a = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
a = '''ABABX'''
a = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
a = '''AAAB'''
a = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
a = '''abcdabcy'''
a = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
a = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 7 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: # picklable for multiprocessing
return x.sum()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class __a :
__a : int
__a : str
class __a (lowerCamelCase ):
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : int = [1, 2]
UpperCAmelCase_ : str = {'''a''': 1, '''b''': 2}
UpperCAmelCase_ : str = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCAmelCase_ : str = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCAmelCase_ : List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[Any] = [2, 3]
UpperCAmelCase_ : List[str] = {'''a''': 2, '''b''': 3}
UpperCAmelCase_ : int = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCAmelCase_ : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCAmelCase_ : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
UpperCAmelCase_ : Dict = 2
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
UpperCAmelCase_ : Optional[int] = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
UpperCAmelCase_ : str = {'''a''': 2, '''b''': 0, '''c''': 2}
UpperCAmelCase_ : int = {
'''a''': np.eye(2 ).astype(__magic_name__ ),
'''b''': np.zeros(3 ).astype(__magic_name__ ),
'''c''': np.ones(2 ).astype(__magic_name__ ),
}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__magic_name__ ): # can't pickle a local lambda
map_nested(lambda __magic_name__ : x + 1 , __magic_name__ , num_proc=__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {'''a''': 1, '''b''': 2}
UpperCAmelCase_ : Optional[int] = {'''a''': 3, '''b''': 4}
UpperCAmelCase_ : Optional[int] = {'''a''': 5, '''b''': 6}
UpperCAmelCase_ : Optional[int] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__magic_name__ , __magic_name__ , __magic_name__ ) ) , __magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
class __a :
__a : List[str] = "bar"
UpperCAmelCase_ : Union[str, Any] = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(__magic_name__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
UpperCAmelCase_ : List[str] = {F"""{i}""": i for i in range(SCREAMING_SNAKE_CASE__ )}
UpperCAmelCase_ : int = map_nested(lambda SCREAMING_SNAKE_CASE__ : x + 10, SCREAMING_SNAKE_CASE__, num_proc=SCREAMING_SNAKE_CASE__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __a (lowerCamelCase ):
@require_tf
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
UpperCAmelCase_ : int = layers.Dense(2 )
def gen_random_output():
UpperCAmelCase_ : int = tf.random.uniform((1, 3) )
return model(__magic_name__ ).numpy()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
UpperCAmelCase_ : int = gen_random_output()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
UpperCAmelCase_ : List[Any] = gen_random_output()
UpperCAmelCase_ : Optional[int] = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
import torch
def gen_random_output():
UpperCAmelCase_ : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCAmelCase_ : Tuple = torch.rand(1 , 3 )
return model(__magic_name__ ).detach().numpy()
with temp_seed(42 , set_pytorch=__magic_name__ ):
UpperCAmelCase_ : Union[str, Any] = gen_random_output()
with temp_seed(42 , set_pytorch=__magic_name__ ):
UpperCAmelCase_ : List[Any] = gen_random_output()
UpperCAmelCase_ : List[Any] = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCAmelCase_ : List[str] = gen_random_output()
with temp_seed(42 ):
UpperCAmelCase_ : List[str] = gen_random_output()
UpperCAmelCase_ : str = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> str:
UpperCAmelCase_ : List[str] = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = NestedDataStructure(SCREAMING_SNAKE_CASE__ ).flatten()
assert output == expected_output
def lowerCamelCase_ ( ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = A(x=1, y='''foobar''' )
UpperCAmelCase_ : int = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output
UpperCAmelCase_ : Tuple = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
UpperCAmelCase_ : List[Any] = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(SCREAMING_SNAKE_CASE__ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
asdict([1, A(x=10, y='''foo''' )] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
return text.split()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase_ ( ) -> Optional[int]:
with Pool(2 ) as pool:
UpperCAmelCase_ : List[Any] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCAmelCase_ : Optional[int] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE__, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCAmelCase_ : str = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(SCREAMING_SNAKE_CASE__ ) == 4
| 644 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """deta"""
_a = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=900 , lowerCAmelCase=2_048 , lowerCAmelCase=6 , lowerCAmelCase=2_048 , lowerCAmelCase=8 , lowerCAmelCase=6 , lowerCAmelCase=1_024 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=256 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="sine" , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=True , lowerCAmelCase=300 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.25 , **lowerCAmelCase , ) -> Tuple:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =backbone_config.pop('model_type' )
_lowercase =CONFIG_MAPPING[backbone_model_type]
_lowercase =config_class.from_dict(lowerCAmelCase )
_lowercase =backbone_config
_lowercase =num_queries
_lowercase =max_position_embeddings
_lowercase =d_model
_lowercase =encoder_ffn_dim
_lowercase =encoder_layers
_lowercase =encoder_attention_heads
_lowercase =decoder_ffn_dim
_lowercase =decoder_layers
_lowercase =decoder_attention_heads
_lowercase =dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =activation_function
_lowercase =init_std
_lowercase =init_xavier_std
_lowercase =encoder_layerdrop
_lowercase =auxiliary_loss
_lowercase =position_embedding_type
# deformable attributes
_lowercase =num_feature_levels
_lowercase =encoder_n_points
_lowercase =decoder_n_points
_lowercase =two_stage
_lowercase =two_stage_num_proposals
_lowercase =with_box_refine
_lowercase =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowercase =class_cost
_lowercase =bbox_cost
_lowercase =giou_cost
# Loss coefficients
_lowercase =mask_loss_coefficient
_lowercase =dice_loss_coefficient
_lowercase =bbox_loss_coefficient
_lowercase =giou_loss_coefficient
_lowercase =eos_coefficient
_lowercase =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.d_model
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =copy.deepcopy(self.__dict__ )
_lowercase =self.backbone_config.to_dict()
_lowercase =self.__class__.model_type
return output
| 291 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase_ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_lowercase =self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase ='src/transformers'
shutil.rmtree(self.transformer_dir )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> str:
'''simple docstring'''
_lowercase =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowercase =black.format_str(lowerCAmelCase , mode=lowerCAmelCase )
_lowercase =os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowerCAmelCase , 'w' , newline='\n' ) as f:
f.write(lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase )
with open(lowerCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase ) , )
# Copy consistency with a really long name
_lowercase ='TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase , lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase ) , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowercase , _lowercase =check_copies.convert_to_localized_md(
lowerCAmelCase , lowerCAmelCase , localized_readme['format_model_list'] )
self.assertFalse(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
_lowercase , _lowercase =check_copies.convert_to_localized_md(
lowerCAmelCase , lowerCAmelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase )
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase , _lowercase =check_copies.convert_to_localized_md(
lowerCAmelCase , lowerCAmelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 291 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , *lowercase__ : List[str] , **lowercase__ : List[Any] ) ->None:
'''simple docstring'''
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 204 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''gptj'''
UpperCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowercase__ : Union[str, Any]=50_400 , lowercase__ : Union[str, Any]=2_048 , lowercase__ : Tuple=4_096 , lowercase__ : List[str]=28 , lowercase__ : Optional[int]=16 , lowercase__ : str=64 , lowercase__ : Any=None , lowercase__ : Any="gelu_new" , lowercase__ : Union[str, Any]=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=0.0 , lowercase__ : Tuple=1e-5 , lowercase__ : Any=0.0_2 , lowercase__ : int=True , lowercase__ : int=50_256 , lowercase__ : Any=50_256 , lowercase__ : Tuple=False , **lowercase__ : str , ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : List[str] = n_positions
_UpperCamelCase : Union[str, Any] = n_embd
_UpperCamelCase : Union[str, Any] = n_layer
_UpperCamelCase : Optional[Any] = n_head
_UpperCamelCase : Dict = n_inner
_UpperCamelCase : Optional[Any] = rotary_dim
_UpperCamelCase : Tuple = activation_function
_UpperCamelCase : List[Any] = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Optional[Any] = attn_pdrop
_UpperCamelCase : Optional[Any] = layer_norm_epsilon
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : PretrainedConfig , lowercase__ : str = "default" , lowercase__ : List[PatchingSpec] = None , lowercase__ : bool = False , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ )
if not getattr(self._config , "pad_token_id" , lowercase__ ):
# TODO: how to do that better?
_UpperCamelCase : Optional[int] = 0
@property
def snake_case__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="inputs" )
_UpperCamelCase : str = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCamelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self : int ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self : int , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = super(lowercase__ , self ).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCamelCase , _UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCamelCase : Optional[int] = seqlen + 2
_UpperCamelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Dict = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCamelCase : int = ordered_inputs["attention_mask"].dtype
_UpperCamelCase : Optional[int] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : Tuple ) ->int:
'''simple docstring'''
return 13
| 204 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a_ : int = get_logger(__name__)
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=0 ):
"""simple docstring"""
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
with FSDP.state_dict_type(
UpperCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase = os.path.join(UpperCAmelCase__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
lowerCamelCase = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=UpperCAmelCase__ , storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCAmelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
lowerCamelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowerCamelCase = torch.load(UpperCAmelCase__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowerCamelCase = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowerCamelCase = torch.load(UpperCAmelCase__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowerCamelCase = (
os.path.join(UpperCAmelCase__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
lowerCamelCase = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCAmelCase__ , storage_reader=dist_cp.FileSystemReader(UpperCAmelCase__ ) , planner=DefaultLoadPlanner() , )
lowerCamelCase = state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(UpperCAmelCase__ )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=0 ):
"""simple docstring"""
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
with FSDP.state_dict_type(
UpperCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowerCamelCase = FSDP.optim_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowerCamelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowerCamelCase = os.path.join(UpperCAmelCase__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowerCamelCase = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowerCamelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
lowerCamelCase = torch.load(UpperCAmelCase__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowerCamelCase = (
os.path.join(UpperCAmelCase__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(UpperCAmelCase__ ) , )
lowerCamelCase = optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
lowerCamelCase = FSDP.optim_state_dict_to_load(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
optimizer.load_state_dict(UpperCAmelCase__ )
| 623 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 623 | 1 |
import heapq
def _SCREAMING_SNAKE_CASE ( a ) -> set[int]:
__A : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(a , [-1 * len(a ), (key, value)] )
# chosen_vertices = set of chosen vertices
__A : List[str] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__A : str = heapq.heappop(a )[1][0]
chosen_vertices.add(a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__A : Optional[int] = elem[1][1].index(a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Union[str, Any] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 77 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _A( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
__A : List[str] = nn.Linear(3 , 4 )
__A : Optional[Any] = nn.BatchNormad(4 )
__A : List[Any] = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self , _A ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
__A : str = os.path.join(_A , 'index.json' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__A : Optional[int] = os.path.join(_A , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCAmelCase_ ( self ):
__A : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__A : Tuple = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
__A : int = offload_weight(_A , 'weight' , _A , {} )
__A : Union[str, Any] = os.path.join(_A , 'weight.dat' )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {'weight': {'shape': [2, 3], 'dtype': str(_A ).split('.' )[1]}} )
__A : List[str] = load_offloaded_weight(_A , index['weight'] )
self.assertTrue(torch.equal(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : int = ModelForTest()
__A : Union[str, Any] = model.state_dict()
__A : Optional[Any] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
__A : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : List[str] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
__A : Union[str, Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
__A : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
__A : Optional[int] = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
__A : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def UpperCAmelCase_ ( self ):
__A : Dict = {'a.1': 0, 'a.10': 1, 'a.2': 2}
__A : str = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1': 0, 'a.2': 2} )
__A : Optional[Any] = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
__A : Any = extract_submodules_state_dict(_A , ['a.1', 'a.2'] )
self.assertDictEqual(_A , {'a.1.a': 0, 'a.2.a': 2} )
| 77 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.