code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( a_ ) -> Optional[Any]:
if is_torch_version('<' ,'2.0.0' ) or not hasattr(a_ ,'_dynamo' ):
return False
return isinstance(a_ ,torch._dynamo.eval_frame.OptimizedModule )
def A ( a_ ,a_ = True ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase : List[Any] =is_compiled_module(a_ )
if is_compiled:
__UpperCamelCase : Optional[Any] =model
__UpperCamelCase : List[Any] =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(a_ ,a_ ):
__UpperCamelCase : Any =model.module
if not keep_fpaa_wrapper:
__UpperCamelCase : Any =getattr(a_ ,'forward' )
__UpperCamelCase : List[Any] =model.__dict__.pop('_original_forward' ,a_ )
if original_forward is not None:
while hasattr(a_ ,'__wrapped__' ):
__UpperCamelCase : Optional[int] =forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase : str =forward
if getattr(a_ ,'_converted_to_transformer_engine' ,a_ ):
convert_model(a_ ,to_transformer_engine=a_ )
if is_compiled:
__UpperCamelCase : Dict =model
__UpperCamelCase : int =compiled_model
return model
def A ( ) -> Optional[Any]:
PartialState().wait_for_everyone()
def A ( a_ ,a_ ) -> List[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(a_ ,a_ )
elif PartialState().local_process_index == 0:
torch.save(a_ ,a_ )
@contextmanager
def A ( **a_ ) -> int:
for key, value in kwargs.items():
__UpperCamelCase : Optional[Any] =str(a_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( a_ ) -> List[Any]:
if not hasattr(a_ ,'__qualname__' ) and not hasattr(a_ ,'__name__' ):
__UpperCamelCase : int =getattr(a_ ,'__class__' ,a_ )
if hasattr(a_ ,'__qualname__' ):
return obj.__qualname__
if hasattr(a_ ,'__name__' ):
return obj.__name__
return str(a_ )
def A ( a_ ,a_ ) -> List[str]:
for key, value in source.items():
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =destination.setdefault(a_ ,{} )
merge_dicts(a_ ,a_ )
else:
__UpperCamelCase : Optional[int] =value
return destination
def A ( a_ = None ) -> bool:
if port is None:
__UpperCamelCase : Any =29_500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 71 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Tuple = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Union[str, Any] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A_ :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Dict = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = "gpt_neo"
_UpperCAmelCase :Dict = ["past_key_values"]
_UpperCAmelCase :Union[str, Any] = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Tuple , snake_case__ : Optional[Any]=5_0257 , snake_case__ : Optional[int]=2048 , snake_case__ : Dict=2048 , snake_case__ : int=24 , snake_case__ : str=[[["global", "local"], 12]] , snake_case__ : Union[str, Any]=16 , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=256 , snake_case__ : int="gelu_new" , snake_case__ : Optional[int]=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=1E-5 , snake_case__ : List[Any]=0.02 , snake_case__ : List[str]=True , snake_case__ : Dict=5_0256 , snake_case__ : List[str]=5_0256 , **snake_case__ : List[Any] , ):
lowerCamelCase_ : Tuple =vocab_size
lowerCamelCase_ : Any =max_position_embeddings
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : int =num_layers
lowerCamelCase_ : Union[str, Any] =num_heads
lowerCamelCase_ : List[str] =intermediate_size
lowerCamelCase_ : int =window_size
lowerCamelCase_ : Any =activation_function
lowerCamelCase_ : Dict =resid_dropout
lowerCamelCase_ : Optional[Any] =embed_dropout
lowerCamelCase_ : Tuple =attention_dropout
lowerCamelCase_ : Tuple =classifier_dropout
lowerCamelCase_ : List[Any] =layer_norm_epsilon
lowerCamelCase_ : List[str] =initializer_range
lowerCamelCase_ : Union[str, Any] =use_cache
lowerCamelCase_ : Union[str, Any] =bos_token_id
lowerCamelCase_ : Optional[int] =eos_token_id
lowerCamelCase_ : Tuple =attention_types
lowerCamelCase_ : Dict =self.expand_attention_types_params(snake_case__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str ):
lowerCamelCase_ : Any =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> Optional[Any]:
import torch
lowerCamelCase_ : Union[str, Any] =input.size()
lowerCamelCase_ : Any =len(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =shape[dimension]
lowerCamelCase_ : Optional[Any] =torch.arange(0 , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : List[str] =torch.div(sizedim - size , lowerCamelCase__ , rounding_mode="floor" ) + 1
lowerCamelCase_ : List[Any] =torch.arange(lowerCamelCase__ ) + low_indices[:min_length][:, None]
lowerCamelCase_ : Any =[slice(lowerCamelCase__ )] * rank
lowerCamelCase_ : Tuple =indices
lowerCamelCase_ : List[Any] =input[s]
lowerCamelCase_ : Dict =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> Tuple:
import torch
lowerCamelCase_ : str =torch.arange(1 , lowerCamelCase__ )
lowerCamelCase_ : List[str] =torch.remainder(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Dict =remainders == 0
lowerCamelCase_ : List[Any] =candidates[divisor_indices]
lowerCamelCase_ : Optional[Any] =torch.max(lowerCamelCase__ )
return largest_divisor, torch.div(lowerCamelCase__ , lowerCamelCase__ , rounding_mode="floor" )
class lowercase__ ( snake_case__ ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Tuple =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
lowerCamelCase_ : List[Any] ={0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase_ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase__ ( self : List[Any] ):
return self._config.num_heads
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
lowerCamelCase_ : Optional[int] =super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ : Optional[int] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ : List[Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase_ : List[str] =seqlen + 2
lowerCamelCase_ : List[str] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ : Optional[int] =[
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
lowerCamelCase_ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase_ : Optional[int] =ordered_inputs["attention_mask"].dtype
lowerCamelCase_ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return 13
| 209 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A__ : List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
A__ : Optional[int] = {
'facebook/blenderbot_small-90M': 512,
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Tuple = BlenderbotSmallTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : str=None , snake_case__ : Any="<|endoftext|>" , snake_case__ : Tuple="<|endoftext|>" , snake_case__ : Tuple="<|endoftext|>" , snake_case__ : str=False , snake_case__ : int=True , **snake_case__ : Tuple , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Optional[int] =add_prefix_space
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[str]=None ):
lowerCamelCase_ : Optional[Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : int =[self.sep_token_id]
lowerCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 209 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_A : Optional[int] ='''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_A : Optional[int] ='''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_A : List[str] ='''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowerCamelCase_ ( self: str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any=False ):
if return_pvalue:
lowerCamelCase__ : str = pearsonr(UpperCamelCase__ , UpperCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
| 41 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """depth_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Dict , UpperCamelCase__: Tuple=13 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Optional[Any]=0.25 , UpperCamelCase__: int=8 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple="relu6" , UpperCamelCase__: int=0.1 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=10 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = depth_multiplier
lowerCamelCase__ : Union[str, Any] = min_depth
lowerCamelCase__ : Optional[Any] = tf_padding
lowerCamelCase__ : str = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Tuple = classifier_dropout_prob
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = scope
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : List[str] = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[Any] = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
a = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = MobileNetVaModelTester(self )
lowerCamelCase__ : List[str] = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Tuple = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
def UpperCamelCase_( snake_case : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase_( snake_case : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_SCREAMING_SNAKE_CASE : Any = "▁"
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case_ = len(self.sp_model ) - 1
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = ""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a__ )
snake_case_ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 92 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = IFImgaImgSuperResolutionPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_a = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('''mps''' ):
UpperCamelCase__ :List[Any] = torch.manual_seed(UpperCAmelCase__ )
else:
UpperCamelCase__ :Optional[int] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
UpperCamelCase__ :Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCamelCase__ :Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCamelCase__ :Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 97 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a :
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MBartConfig
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]=13 , lowerCamelCase : List[Any]=7 , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=99 , lowerCamelCase : Optional[Any]=32 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : List[str]=4 , lowerCamelCase : Optional[int]=37 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Optional[Any]=20 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]=1 , lowerCamelCase : int=0 , ) -> Union[str, Any]:
__snake_case : str = parent
__snake_case : List[Any] = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : List[Any] = is_training
__snake_case : List[Any] = use_labels
__snake_case : Any = vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : Any = eos_token_id
__snake_case : Dict = pad_token_id
__snake_case : Union[str, Any] = bos_token_id
def __snake_case ( self : Any ) -> List[str]:
__snake_case : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : Tuple = prepare_mbart_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ) -> Any:
__snake_case : Optional[int] = TFMBartModel(config=lowerCamelCase ).get_decoder()
__snake_case : List[str] = inputs_dict["input_ids"]
__snake_case : List[Any] = input_ids[:1, :]
__snake_case : Dict = inputs_dict["attention_mask"][:1, :]
__snake_case : Dict = inputs_dict["head_mask"]
__snake_case : int = 1
# first forward pass
__snake_case : Any = model(lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , use_cache=lowerCamelCase )
__snake_case : List[Any] = outputs.to_tuple()
__snake_case : str = past_key_values[1]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ):
if attention_mask is None:
__snake_case : Any = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCAmelCase : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : List[Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Tuple = False
def __snake_case ( self : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> Union[str, Any]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Dict = TFMBartModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=lowerCamelCase )
def __snake_case ( self : Tuple ) -> int:
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = [
" UN Chief Says There Is No Military Solution in Syria",
]
__UpperCAmelCase : int = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__UpperCAmelCase : Dict = "facebook/mbart-large-en-ro"
@cached_property
def __snake_case ( self : Optional[int] ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __snake_case ( self : str ) -> str:
__snake_case : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __snake_case ( self : Tuple , **lowerCamelCase : Dict ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.translate_src_text(**lowerCamelCase )
self.assertListEqual(self.expected_text , lowerCamelCase )
def __snake_case ( self : str , **lowerCamelCase : Optional[Any] ) -> Optional[int]:
__snake_case : Optional[int] = self.tokenizer(self.src_text , **lowerCamelCase , return_tensors="tf" )
__snake_case : Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__snake_case : Optional[int] = self.tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
return generated_words
@slow
def __snake_case ( self : List[str] ) -> Optional[Any]:
self._assert_generated_batch_equal_expected()
| 362 |
from __future__ import annotations
_snake_case : Union[str, Any] = []
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for i in range(len(__lowerCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__lowerCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , len(__lowerCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if row >= len(__lowerCamelCase ):
solution.append(__lowerCamelCase )
printboard(__lowerCamelCase )
print()
return True
for i in range(len(__lowerCamelCase ) ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = 1
solve(__lowerCamelCase , row + 1 )
__snake_case : Union[str, Any] = 0
return False
def lowerCAmelCase_ ( __lowerCamelCase ):
for i in range(len(__lowerCamelCase ) ):
for j in range(len(__lowerCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_snake_case : List[str] = 8
_snake_case : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 134 | 0 |
'''simple docstring'''
class A__ :
def __init__( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = len(UpperCamelCase__ )
A_ = [0] * len_array
if len_array > 0:
A_ = array[0]
for i in range(1 , UpperCamelCase__ ):
A_ = self.prefix_sum[i - 1] + array[i]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ ( self , UpperCamelCase__ ) -> bool:
'''simple docstring'''
A_ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 |
from ...processing_utils import ProcessorMixin
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''feature_extractor''']
lowerCamelCase__ = '''TvltImageProcessor'''
lowerCamelCase__ = '''TvltFeatureExtractor'''
def __init__( self : List[str] , __magic_name__ : Any , __magic_name__ : Any ) -> int:
super().__init__(image_processor=__magic_name__ , feature_extractor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor
SCREAMING_SNAKE_CASE_ = feature_extractor
def __call__( self : List[str] , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int=None , __magic_name__ : str=None , __magic_name__ : Any=False , __magic_name__ : int=False , *__magic_name__ : int , **__magic_name__ : Any , ) -> List[Any]:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
SCREAMING_SNAKE_CASE_ = None
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , mask_pixel=__magic_name__ , *__magic_name__ , **__magic_name__ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , is_mixed=__magic_name__ , *__magic_name__ , **__magic_name__ )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(
__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , mask_audio=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = {}
if audio is not None:
output_dict.update(__magic_name__ )
if images is not None:
output_dict.update(__magic_name__ )
if images_mixed_dict is not None:
output_dict.update(__magic_name__ )
return output_dict
@property
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 118 | 0 |
UpperCamelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = [False] * len(__A )
UpperCAmelCase__ = [s]
UpperCAmelCase__ = True
while queue:
UpperCAmelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
UpperCAmelCase__ = True
UpperCAmelCase__ = u
return visited[t]
def lowerCAmelCase_ ( __A, __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = [-1] * (len(__A ))
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
UpperCAmelCase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__A, __A, __A, __A ):
UpperCAmelCase__ = float("Inf" )
UpperCAmelCase__ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase__ = min(__A, graph[parent[s]][s] )
UpperCAmelCase__ = parent[s]
max_flow += path_flow
UpperCAmelCase__ = sink
while v != source:
UpperCAmelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase__ = parent[v]
for i in range(len(__A ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 366 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143 | 0 |
# Imports
import numpy as np
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ):
'''simple docstring'''
self.set_matricies(red=__lowerCAmelCase , green=__lowerCAmelCase , blue=__lowerCAmelCase , red_edge=__lowerCAmelCase , nir=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ):
'''simple docstring'''
if red is not None:
lowerCamelCase__ = red
if green is not None:
lowerCamelCase__ = green
if blue is not None:
lowerCamelCase__ = blue
if red_edge is not None:
lowerCamelCase__ = red_edge
if nir is not None:
lowerCamelCase__ = nir
return True
def __lowerCamelCase ( self , __lowerCAmelCase="" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ):
'''simple docstring'''
self.set_matricies(red=__lowerCAmelCase , green=__lowerCAmelCase , blue=__lowerCAmelCase , red_edge=__lowerCAmelCase , nir=__lowerCAmelCase )
lowerCamelCase__ = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def __lowerCamelCase ( self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCamelCase ( self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def __lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCamelCase ( self , __lowerCAmelCase=0.08 , __lowerCAmelCase=1.22 , __lowerCAmelCase=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.nir - self.green
def __lowerCamelCase ( self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowerCamelCase ( self , __lowerCAmelCase=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCamelCase ( self , __lowerCAmelCase=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCamelCase ( self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 209 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """camembert"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
class __A ( lowerCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 209 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a_ ( unittest.TestCase ):
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = 20
UpperCamelCase = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
UpperCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCamelCase = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCamelCase = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
UpperCamelCase = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = 10
UpperCamelCase = 2
# create ramp distribution
UpperCamelCase = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
UpperCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCamelCase = FlaxTopKLogitsWarper(3 )
UpperCamelCase = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCamelCase = 5
UpperCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCamelCase = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
UpperCamelCase = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = 10
UpperCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
UpperCamelCase = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCamelCase = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCamelCase = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
UpperCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCamelCase = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 20
UpperCamelCase = 4
UpperCamelCase = 0
UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
UpperCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCamelCase = 5
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 15
UpperCamelCase = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 20
UpperCamelCase = 4
UpperCamelCase = 0
UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
UpperCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCamelCase = 1
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCamelCase = 3
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = 20
UpperCamelCase = 4
UpperCamelCase = 0
UpperCamelCase = 5
UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCamelCase = 4
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCamelCase = 3
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 10
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = 1
UpperCamelCase = 15
# dummy input_ids and scores
UpperCamelCase = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = input_ids.copy()
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scores.copy()
# instantiate all dist processors
UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase = FlaxTopKLogitsWarper(3 )
UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase = 10
# no processor list
UpperCamelCase = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 10
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = 1
UpperCamelCase = 15
# dummy input_ids and scores
UpperCamelCase = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
UpperCamelCase = input_ids.copy()
UpperCamelCase = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = scores.copy()
# instantiate all dist processors
UpperCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCamelCase = FlaxTopKLogitsWarper(3 )
UpperCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
UpperCamelCase = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCamelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
UpperCamelCase = jax.jit(_SCREAMING_SNAKE_CASE )
UpperCamelCase = jax.jit(_SCREAMING_SNAKE_CASE )
UpperCamelCase = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 183 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowercase__ ( __UpperCamelCase )-> str:
return "".join(sorted(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase )-> list[str]:
return word_by_signature[signature(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE__ = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 183 | 1 |
UpperCamelCase__ = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 92 |
class a__ ( snake_case__ ):
pass
class a__ ( snake_case__ ):
pass
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = [
[],
[],
[],
]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(_A )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = []
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(_A )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def _a ( ):
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 92 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
snake_case : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase ( lowercase : int = 100 ) -> int:
"""simple docstring"""
snake_case : Dict = 1
snake_case : Union[str, Any] = 2
for i in range(2 , max_n + 1 ):
snake_case : List[Any] = pre_numerator
snake_case : Dict = 2 * i // 3 if i % 3 == 0 else 1
snake_case : Any = cur_numerator
snake_case : str = e_cont * pre_numerator + temp
return sum_digits(lowercase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 351 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int , lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case : Dict = update_area_of_max_square(lowercase , col + 1 )
snake_case : Tuple = update_area_of_max_square(row + 1 , col + 1 )
snake_case : Any = update_area_of_max_square(row + 1 , lowercase )
if mat[row][col]:
snake_case : List[Any] = 1 + min([right, diagonal, down] )
snake_case : Any = max(largest_square_area[0] , lowercase )
return sub_problem_sol
else:
return 0
snake_case : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case : List[str] = update_area_of_max_square_using_dp_array(lowercase , col + 1 , lowercase )
snake_case : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase )
snake_case : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , lowercase , lowercase )
if mat[row][col]:
snake_case : Dict = 1 + min([right, diagonal, down] )
snake_case : Union[str, Any] = max(largest_square_area[0] , lowercase )
snake_case : str = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case : Union[str, Any] = [0]
snake_case : int = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase )
return largest_square_area[0]
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
snake_case : int = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case : Tuple = dp_array[row][col + 1]
snake_case : Any = dp_array[row + 1][col + 1]
snake_case : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case : Optional[int] = 1 + min(lowercase , lowercase , lowercase )
snake_case : Tuple = max(dp_array[row][col] , lowercase )
else:
snake_case : List[Any] = 0
return largest_square_area
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
snake_case : Any = [0] * (cols + 1)
snake_case : Any = [0] * (cols + 1)
snake_case : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case : Dict = current_row[col + 1]
snake_case : List[Any] = next_row[col + 1]
snake_case : Dict = next_row[col]
if mat[row][col] == 1:
snake_case : Union[str, Any] = 1 + min(lowercase , lowercase , lowercase )
snake_case : Optional[int] = max(current_row[col] , lowercase )
else:
snake_case : Optional[Any] = 0
snake_case : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 112 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Any = []
snake_case_ : Tuple = []
snake_case_ : Tuple = []
for rt in rc.restypes:
snake_case_ : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case_ : Any = {name: i for i, name in enumerate(__snake_case )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case_ : List[str] = torch.tensor(
__snake_case , dtype=torch.intaa , device=protein['aatype'].device , )
snake_case_ : Tuple = torch.tensor(
__snake_case , dtype=torch.intaa , device=protein['aatype'].device , )
snake_case_ : List[str] = torch.tensor(
__snake_case , dtype=torch.floataa , device=protein['aatype'].device , )
snake_case_ : Any = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case_ : Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case_ : Union[str, Any] = restype_atomaa_mask[protein_aatype]
snake_case_ : Dict = residx_atomaa_mask
snake_case_ : int = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case_ : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case_ : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case_ : int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case_ : Optional[int] = rc.restype_atoa[restype_letter]
snake_case_ : Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case_ : Optional[int] = rc.atom_order[atom_name]
snake_case_ : List[str] = 1
snake_case_ : Tuple = restype_atomaa_mask[protein_aatype]
snake_case_ : str = residx_atomaa_mask
return protein
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = tree_map(lambda __a : torch.tensor(__snake_case , device=batch['aatype'].device ) , __snake_case , np.ndarray )
snake_case_ : str = tensor_tree_map(lambda __a : np.array(__snake_case ) , make_atomaa_masks(__snake_case ) )
return out
| 327 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__snake_case : Tuple = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def __lowerCamelCase ( __snake_case : List[str], __snake_case : List[Any], __snake_case : Optional[Any], __snake_case : List[Any], __snake_case : Any, __snake_case : Any ) -> int:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(__snake_case ), version.parse(__snake_case ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __lowerCamelCase ( __snake_case : str, __snake_case : Optional[str] = None ) -> None:
"""simple docstring"""
A__ : int =f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""", __snake_case ):
A__ , A__ , A__ : Union[str, Any] =requirement, None, None
else:
A__ : List[str] =re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""", __snake_case )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
A__ , A__ : Optional[int] =match[0]
A__ : Tuple =want_full.split(""",""" ) # there could be multiple requirements
A__ : Optional[Any] ={}
for w in want_range:
A__ : Tuple =re.findall(r"""^([\s!=<>]{1,2})(.+)""", __snake_case )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
A__ , A__ : str =match[0]
A__ : Optional[Any] =want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
A__ : Any =""".""".join([str(__snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
return
# check if any version is installed
try:
A__ : Optional[int] =importlib.metadata.version(__snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
A__ : str ="""Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(__snake_case, __snake_case )
| 134 | 0 |
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCAmelCase : Optional[Any] = "facebook/wmt19-en-de"
UpperCAmelCase : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCAmelCase : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCAmelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCAmelCase : str = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCAmelCase : str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
UpperCAmelCase : List[str] = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 360 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """mvp"""
_lowercase : Tuple = ["""past_key_values"""]
_lowercase : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase__=5_0_2_6_7 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=8_0_0 , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
a__ : Dict =vocab_size
a__ : List[str] =max_position_embeddings
a__ : List[str] =d_model
a__ : Optional[Any] =encoder_ffn_dim
a__ : Dict =encoder_layers
a__ : List[str] =encoder_attention_heads
a__ : List[str] =decoder_ffn_dim
a__ : Tuple =decoder_layers
a__ : Tuple =decoder_attention_heads
a__ : Any =dropout
a__ : str =attention_dropout
a__ : str =activation_dropout
a__ : Optional[int] =activation_function
a__ : Union[str, Any] =init_std
a__ : Dict =encoder_layerdrop
a__ : List[str] =decoder_layerdrop
a__ : Union[str, Any] =classifier_dropout
a__ : Union[str, Any] =use_cache
a__ : Optional[Any] =encoder_layers
a__ : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
a__ : int =use_prompt
a__ : Union[str, Any] =prompt_length
a__ : Dict =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , lowerCAmelCase__ ):
a__ : Tuple =self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
| 148 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 54 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 4
snake_case__ : List[Any] = 3
snake_case__ : int = (32, 32)
snake_case__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : str = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
snake_case__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 4
snake_case__ : List[Any] = 4
snake_case__ : List[str] = (32, 32)
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> int:
'''simple docstring'''
return (4, 32, 32)
@property
def __a ( self ) -> str:
'''simple docstring'''
return (4, 32, 32)
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
snake_case__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
snake_case__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model.to(__UpperCamelCase )
snake_case__ : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model_accelerate.to(__UpperCamelCase )
model_accelerate.eval()
snake_case__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : Union[str, Any] = noise.to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
snake_case__ : str = model_accelerate(__UpperCamelCase , __UpperCamelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__ , snake_case__ : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase )
model_normal_load.to(__UpperCamelCase )
model_normal_load.eval()
snake_case__ : List[str] = model_normal_load(__UpperCamelCase , __UpperCamelCase )['sample']
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCamelCase )
snake_case__ : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : List[Any] = noise.to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : List[str] = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ : int = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 ) )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self , __UpperCamelCase=(32, 32) ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = 4
snake_case__ : Dict = 3
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __a ( self ) -> int:
'''simple docstring'''
return (3, 32, 32)
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
snake_case__ : str = self.dummy_input
return init_dict, inputs_dict
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ , snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
snake_case__ : Dict = self.dummy_input
snake_case__ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(__UpperCamelCase )
snake_case__ : List[Any] = noise
snake_case__ : Any = model(**__UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCamelCase )
snake_case__ : Optional[Any] = 4
snake_case__ : str = 3
snake_case__ : List[Any] = (256, 256)
snake_case__ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : str = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCamelCase )
snake_case__ : Dict = 4
snake_case__ : List[str] = 3
snake_case__ : Union[str, Any] = (32, 32)
snake_case__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : Tuple = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
| 143 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) - pat_len + 1 ):
UpperCAmelCase__ = True
for j in range(SCREAMING_SNAKE_CASE__ ):
if s[i + j] != pattern[j]:
UpperCAmelCase__ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 61 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = input_str.split("""_""" )
UpperCAmelCase__ = 0 if use_pascal else 1
UpperCAmelCase__ = words[start_index:]
UpperCAmelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 61 | 1 |
"""simple docstring"""
from __future__ import annotations
class a :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = text, pattern
lowerCamelCase_ , lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self : Optional[Any] ) -> list[int]:
# searches pattern in text and returns index positions
lowerCamelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase_ = self.mismatch_in_text(__SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_SCREAMING_SNAKE_CASE : Dict = '''ABAABA'''
_SCREAMING_SNAKE_CASE : int = '''AB'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = BoyerMooreSearch(text, pattern)
_SCREAMING_SNAKE_CASE : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 183 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a ( __snake_case ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = "▁" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<unk>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "</s>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<pad>" , ) -> Optional[int]:
lowerCamelCase_ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCamelCase_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase_ = token_dict['token']
lowerCamelCase_ = Tokenizer(Unigram() )
lowerCamelCase_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCamelCase_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase_ = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCamelCase_ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> Tuple:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = [files]
self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[Iterator[str], Iterator[Iterator[str]]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> str:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = json.loads(self._tokenizer.to_str() )
lowerCamelCase_ = self.special_tokens['unk']['id']
lowerCamelCase_ = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE ) )
| 183 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = value
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
class A_ :
def __init__( self: Tuple ,__lowerCAmelCase: Node ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = tree
def _lowercase ( self: int ,__lowerCAmelCase: Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self: Any ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 340 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: dict ):
__SCREAMING_SNAKE_CASE : List[str] = set()
# edges = list of graph's edges
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_edges(_lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = edges.pop()
chosen_vertices.add(_lowerCamelCase )
chosen_vertices.add(_lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase )
return chosen_vertices
def lowerCAmelCase_ ( _lowerCamelCase: dict ):
__SCREAMING_SNAKE_CASE : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 112 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Any ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__SCREAMING_SNAKE_CASE : Any = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = object_detector(examples[0] , threshold=0.0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
self.assertGreater(lowerCAmelCase__ , 0 )
self.assertEqual(
lowerCAmelCase__ , [
{
"""score""": ANY(lowerCAmelCase__ ),
"""label""": ANY(lowerCAmelCase__ ),
"""box""": {"""xmin""": ANY(lowerCAmelCase__ ), """ymin""": ANY(lowerCAmelCase__ ), """xmax""": ANY(lowerCAmelCase__ ), """ymax""": ANY(lowerCAmelCase__ )},
}
for i in range(lowerCAmelCase__ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
pass
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
__SCREAMING_SNAKE_CASE : int = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.72_35, """label""": """cat""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.72_18, """label""": """remote""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.71_84, """label""": """couch""", """box""": {"""xmin""": 2_0_4, """ymin""": 1_6_7, """xmax""": 2_3_2, """ymax""": 1_9_0}},
{"""score""": 0.67_48, """label""": """remote""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_56, """label""": """cat""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.66_14, """label""": """couch""", """box""": {"""xmin""": 5_7_1, """ymin""": 8_3, """xmax""": 5_9_8, """ymax""": 1_0_3}},
{"""score""": 0.64_56, """label""": """remote""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
{"""score""": 0.6_42, """label""": """remote""", """box""": {"""xmin""": 6_7, """ymin""": 2_7_4, """xmax""": 9_3, """ymax""": 2_9_7}},
{"""score""": 0.64_19, """label""": """cat""", """box""": {"""xmin""": 4_9_4, """ymin""": 1_0_5, """xmax""": 5_2_1, """ymax""": 1_2_7}},
]
] , )
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pipeline("""zero-shot-object-detection""" )
__SCREAMING_SNAKE_CASE : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
] , )
__SCREAMING_SNAKE_CASE : Dict = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
[
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
{"""score""": 0.14_74, """label""": """remote""", """box""": {"""xmin""": 3_3_5, """ymin""": 7_4, """xmax""": 3_7_1, """ymax""": 1_8_7}},
{"""score""": 0.12_08, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_4_2, """ymax""": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
pass
@require_torch
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 0.2
__SCREAMING_SNAKE_CASE : Dict = pipeline("""zero-shot-object-detection""" )
__SCREAMING_SNAKE_CASE : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
{"""score""": 0.25_37, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 5_5, """xmax""": 3_1_5, """ymax""": 4_7_2}},
] , )
@require_torch
@slow
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : int = pipeline("""zero-shot-object-detection""" )
__SCREAMING_SNAKE_CASE : int = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"""score""": 0.28_68, """label""": """cat""", """box""": {"""xmin""": 3_2_4, """ymin""": 2_0, """xmax""": 6_4_0, """ymax""": 3_7_3}},
{"""score""": 0.2_77, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_2, """xmax""": 1_7_7, """ymax""": 1_1_5}},
] , )
| 112 | 1 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, ) -> Dict:
UpperCAmelCase_: Optional[Any] = parent
UpperCAmelCase_: Tuple = vocab_size
UpperCAmelCase_: str = batch_size
UpperCAmelCase_: List[str] = image_size
UpperCAmelCase_: Any = patch_size
UpperCAmelCase_: Any = num_channels
UpperCAmelCase_: Dict = is_training
UpperCAmelCase_: Optional[int] = use_labels
UpperCAmelCase_: Optional[int] = hidden_size
UpperCAmelCase_: str = num_hidden_layers
UpperCAmelCase_: Any = num_attention_heads
UpperCAmelCase_: Optional[int] = intermediate_size
UpperCAmelCase_: Dict = hidden_act
UpperCAmelCase_: Optional[int] = hidden_dropout_prob
UpperCAmelCase_: str = attention_probs_dropout_prob
UpperCAmelCase_: Dict = type_sequence_label_size
UpperCAmelCase_: Optional[int] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_: Union[str, Any] = (image_size // patch_size) ** 2
UpperCAmelCase_: Tuple = num_patches + 1
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: str = None
if self.use_labels:
UpperCAmelCase_: Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Optional[Any] = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: List[Any] = FlaxBeitModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: List[Any] = FlaxBeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = self.type_sequence_label_size
UpperCAmelCase_: Tuple = FlaxBeitForImageClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_: Union[str, Any] = 1
UpperCAmelCase_: Tuple = FlaxBeitForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
): List[str] = config_and_inputs
UpperCAmelCase_: List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _a ( _lowerCAmelCase , unittest.TestCase ):
A = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __snake_case (self ) -> None:
UpperCAmelCase_: Tuple = FlaxBeitModelTester(self )
UpperCAmelCase_: Tuple = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: str = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_: str = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase_: Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase_: int = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape, output.shape )
def __snake_case (self ) -> Any:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
UpperCAmelCase_: str = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __snake_case (self ) -> int:
UpperCAmelCase_: List[str] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
UpperCAmelCase_: List[str] = self.default_image_processor
UpperCAmelCase_: Any = prepare_img()
UpperCAmelCase_: Dict = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase_: Any = np.ones((1, 196), dtype=SCREAMING_SNAKE_CASE_ )
# forward pass
UpperCAmelCase_: List[str] = model(pixel_values=SCREAMING_SNAKE_CASE_, bool_masked_pos=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = outputs.logits
# verify the logits
UpperCAmelCase_: Union[str, Any] = (1, 196, 8192)
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9174], [-3.2_4_5_6, 0.4_9_4_8, -13.9401], [-3.2_0_3_3, 0.5_1_2_1, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], SCREAMING_SNAKE_CASE_, atol=1E-2 ) )
@slow
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
UpperCAmelCase_: Optional[Any] = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""np""" )
# forward pass
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = outputs.logits
# verify the logits
UpperCAmelCase_: List[str] = (1, 1000)
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
UpperCAmelCase_: Any = 281
self.assertEqual(logits.argmax(-1 ).item(), SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: int = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""np""" )
# forward pass
UpperCAmelCase_: Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = outputs.logits
# verify the logits
UpperCAmelCase_: Optional[Any] = (1, 21841)
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
UpperCAmelCase_: List[str] = 2396
self.assertEqual(logits.argmax(-1 ).item(), SCREAMING_SNAKE_CASE_ )
| 363 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Dict=None ):
"""simple docstring"""
UpperCAmelCase_: Any = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_: int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
UpperCAmelCase_: Optional[int] = finetuning_task
UpperCAmelCase_: int = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_: Optional[Any] = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_: List[Any] = finetuning_task
UpperCAmelCase_: Optional[Any] = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_: Tuple = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_: Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {os.path.abspath(lowerCAmelCase__ )}' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
a : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
a =logging.get_logger(__name__)
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCamelCase : Union[str, Any] = deprecated_arg[3:]
__lowerCamelCase : Tuple = not kwargs.pop(SCREAMING_SNAKE_CASE__)
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}")
__lowerCamelCase : Optional[Any] = kwargs.pop('tpu_name' ,self.tpu_name)
__lowerCamelCase : Union[str, Any] = kwargs.pop('device_idx' ,self.device_idx)
__lowerCamelCase : Tuple = kwargs.pop('eager_mode' ,self.eager_mode)
__lowerCamelCase : Tuple = kwargs.pop('use_xla' ,self.use_xla)
super().__init__(**SCREAMING_SNAKE_CASE__)
_UpperCAmelCase : str = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name of TPU'''} , )
_UpperCAmelCase : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark models in eager model.'''} )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def lowerCAmelCase ( self : Tuple):
requires_backends(self ,['tf'])
__lowerCamelCase : str = None
if self.tpu:
try:
if self.tpu_name:
__lowerCamelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
__lowerCamelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCamelCase : Union[str, Any] = None
return tpu
@cached_property
def lowerCAmelCase ( self : List[Any]):
requires_backends(self ,['tf'])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
__lowerCamelCase : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU')
__lowerCamelCase : str = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}")
else:
tf.config.set_visible_devices([] ,'GPU') # disable GPU
__lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}")
return strategy
@property
def lowerCAmelCase ( self : List[Any]):
requires_backends(self ,['tf'])
return self._setup_tpu is not None
@property
def lowerCAmelCase ( self : Optional[Any]):
requires_backends(self ,['tf'])
return self._setup_strategy
@property
def lowerCAmelCase ( self : str):
requires_backends(self ,['tf'])
return tf.config.list_physical_devices('GPU')
@property
def lowerCAmelCase ( self : Union[str, Any]):
requires_backends(self ,['tf'])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def lowerCAmelCase ( self : Optional[int]):
return self.n_gpu > 0
| 73 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : List[str] ):
# Initialise PyTorch model
snake_case : Optional[Any] = TaConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case : Tuple = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 148 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
__lowerCamelCase = 3
__lowerCamelCase = 2_50
__lowerCamelCase = ids_tensor((batch_size, length) , a )
__lowerCamelCase = torch.ones((batch_size, length) , device=a , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = MaxLengthCriteria(max_length=10 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
__lowerCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__lowerCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a ) , 1 )
| 353 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =(DPMSolverSDEScheduler,)
lowerCamelCase : List[str] =1_0
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**a )
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
__lowerCamelCase = sample.to(a )
for t in scheduler.timesteps:
__lowerCamelCase = scheduler.scale_model_input(a , a )
__lowerCamelCase = model(a , a )
__lowerCamelCase = scheduler.step(a , a , a )
__lowerCamelCase = output.prev_sample
__lowerCamelCase = torch.sum(torch.abs(a ) )
__lowerCamelCase = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 237 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a = False
class A_ (unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : int = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[int] = generator.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = "cyberpunk 2077"
UpperCAmelCase_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger "
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Optional[Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
UpperCAmelCase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 61 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = initializer_range
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : int = model_class_name(lowercase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : Any = model_class_name(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Any = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 61 | 1 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__UpperCAmelCase : Optional[Any] = VideoClassificationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase , top_k=2 )
__UpperCAmelCase : Any = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
for example in examples:
__UpperCAmelCase : Tuple = video_classifier(UpperCamelCase )
self.assertEqual(
UpperCamelCase , [
{"""score""": ANY(UpperCamelCase ), """label""": ANY(UpperCamelCase )},
{"""score""": ANY(UpperCamelCase ), """label""": ANY(UpperCamelCase )},
] , )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
__UpperCAmelCase : Any = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
__UpperCAmelCase : List[str] = pipeline(
"""video-classification""" , model=UpperCamelCase , feature_extractor=UpperCamelCase , frame_sampling_rate=4 )
__UpperCAmelCase : int = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__UpperCAmelCase : Optional[int] = video_classifier(UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
__UpperCAmelCase : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
| 358 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
pass
def lowerCamelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.array(_UpperCamelCase )
__UpperCAmelCase : List[Any] = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = """facebook/sam-vit-huge"""
__UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase )
__UpperCAmelCase : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 320 | 0 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[list[str]] , UpperCAmelCase_ : int , ):
"""simple docstring"""
a :Optional[int] = len(UpperCamelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCamelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase_ , UpperCamelCase_ , )
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[Any] = []
depth_first_search([] , [] , [] , UpperCamelCase_ , UpperCamelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCamelCase_ )
print('''''' )
print(len(UpperCamelCase_ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _UpperCAmelCase:
lowercase__ = BlenderbotConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = TFBlenderbotModel(config=__a).get_decoder()
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = inputs_dict['''head_mask''']
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = model(__a , attention_mask=__a , past_key_values=__a)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFBlenderbotModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a)
@require_tokenizers
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = ['My friends are cool but they eat too many carbs.']
lowercase__ = 'facebook/blenderbot-400M-distill'
@cached_property
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , return_tensors='''tf''')
_UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
_UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 100 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bert'
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-12 , __a=0 , __a="absolute" , __a=True , __a=None , **__a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 100 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, "models/bert/"))
_lowerCAmelCase : Tuple = self.transformer_dir
shutil.copy(
os.path.join(_snake_case, "src/transformers/models/bert/modeling_bert.py"), os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "src/transformers"
shutil.rmtree(self.transformer_dir)
def snake_case__ ( self, __a, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_lowerCAmelCase : Union[str, Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_lowerCAmelCase : str = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119)
_lowerCAmelCase : Dict = black.format_str(_snake_case, mode=_snake_case)
_lowerCAmelCase : Optional[int] = os.path.join(self.transformer_dir, "new_code.py")
with open(_snake_case, "w", newline="\n") as f:
f.write(_snake_case)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=_snake_case)
with open(_snake_case, "r") as f:
self.assertTrue(f.read(), _snake_case)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(_snake_case, _snake_case)
def snake_case__ ( self):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE + "\n", )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", _snake_case, )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", re.sub("Bert", "TestModel", _snake_case), )
# Copy consistency with a really long name
_lowerCAmelCase : Dict = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", f"{long_class_name}LMPredictionHead", re.sub("Bert", _snake_case, _snake_case), )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", _snake_case, overwrite_result=re.sub("Bert", "TestModel", _snake_case), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowerCAmelCase : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowerCAmelCase : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_lowerCAmelCase , _lowerCAmelCase : Any = check_copies.convert_to_localized_md(
_snake_case, _snake_case, localized_readme["format_model_list"])
self.assertFalse(_snake_case)
self.assertEqual(_snake_case, _snake_case)
_lowerCAmelCase , _lowerCAmelCase : Any = check_copies.convert_to_localized_md(
_snake_case, _snake_case, localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_snake_case)
_lowerCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowerCAmelCase : Optional[int] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase , _lowerCAmelCase : Any = check_copies.convert_to_localized_md(
_snake_case, _snake_case, localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(_snake_case, _snake_case)
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'depth_multiplier' ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3 , _UpperCamelCase=3_2 , _UpperCamelCase=0.25 , _UpperCamelCase=8 , _UpperCamelCase=8 , _UpperCamelCase=6 , _UpperCamelCase=3_2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu6" , _UpperCamelCase=1_2_8_0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=1_0 , _UpperCamelCase=None , ) -> str:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Optional[Any] = depth_multiplier
UpperCAmelCase_ : Optional[Any] = depth_divisible_by
UpperCAmelCase_ : Dict = min_depth
UpperCAmelCase_ : Tuple = expand_ratio
UpperCAmelCase_ : Optional[int] = tf_padding
UpperCAmelCase_ : Union[str, Any] = output_stride
UpperCAmelCase_ : Tuple = first_layer_is_expansion
UpperCAmelCase_ : Dict = finegrained_output
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCAmelCase_ : Optional[Any] = classifier_dropout_prob
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Dict = num_labels
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = scope
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ) -> Any:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : List[Any] = MobileNetVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Dict = MobileNetVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileNetVaForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ : Any = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : Tuple = False
_snake_case : str = False
_snake_case : List[str] = False
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = MobileNetVaModelTester(self )
UpperCAmelCase_ : Any = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[int] = model_class(_UpperCamelCase )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : int = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : List[Any] = outputs.hidden_states
UpperCAmelCase_ : List[str] = 1_6
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> List[str]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : str = MobileNetVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Tuple:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : str = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : int = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCAmelCase_ : Tuple = model.to(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**_UpperCamelCase )
UpperCAmelCase_ : Any = outputs.logits
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 145 |
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] , __snake_case : int ):
'''simple docstring'''
if len(__snake_case ) == 0:
return False
UpperCAmelCase_ : Optional[int] = len(__snake_case ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __snake_case )
else:
return binary_search(a_list[midpoint + 1 :] , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by comma:\n').strip()
__UpperCAmelCase = [int(item.strip()) for item in user_input.split(',')]
__UpperCAmelCase = int(input('Enter the number to be found in the list:\n').strip())
__UpperCAmelCase = '' if binary_search(sequence, target) else 'not '
print(F'{target} was {not_str}found in {sequence}')
| 145 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowerCAmelCase ( nn.Module ):
def __init__( self : Dict ) -> Tuple:
super().__init__()
lowerCamelCase__ : List[str] = nn.Linear(3 , 4 )
lowerCamelCase__ : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase__ : Dict = nn.Linear(4 , 5 )
def A_ ( self : int , UpperCAmelCase : Dict ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class lowerCAmelCase ( UpperCamelCase__ ):
def A_ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ) -> int:
return (args[0] + 1,) + args[1:], kwargs
class lowerCAmelCase ( UpperCamelCase__ ):
def A_ ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ) -> int:
return output + 1
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : int ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = ModelForTest()
lowerCamelCase__ : int = ModelHook()
add_hook_to_module(lowercase_ , lowercase_ )
self.assertEqual(test_model._hf_hook , lowercase_ )
self.assertTrue(hasattr(lowercase_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase_ )
self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase_ , '_old_forward' ) )
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : Optional[int] = ModelForTest()
lowerCamelCase__ : List[Any] = ModelHook()
add_hook_to_module(lowercase_ , lowercase_ )
add_hook_to_module(lowercase_ , lowercase_ , append=lowercase_ )
self.assertEqual(isinstance(test_model._hf_hook , lowercase_ ) , lowercase_ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowercase_ , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowercase_ )
self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) )
self.assertFalse(hasattr(lowercase_ , '_old_forward' ) )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : int = ModelForTest()
lowerCamelCase__ : Union[str, Any] = torch.randn(2 , 3 )
lowerCamelCase__ : Optional[int] = test_model(x + 1 )
lowerCamelCase__ : Optional[Any] = test_model(x + 2 )
lowerCamelCase__ : List[str] = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : List[Any] = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase__ : Optional[Any] = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : Tuple = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase__ : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : List[Any] = test_model(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1e-5 )
def A_ ( self : int ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = ModelForTest()
lowerCamelCase__ : int = torch.randn(2 , 3 )
lowerCamelCase__ : Any = test_model(lowercase_ )
lowerCamelCase__ : Optional[Any] = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : List[str] = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase__ : Union[str, Any] = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : Dict = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase__ : int = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : Union[str, Any] = test_model(lowercase_ )
assert torch.allclose(lowercase_ , output + 2 , atol=1e-5 )
def A_ ( self : Any ) -> Dict:
lowerCamelCase__ : Optional[Any] = ModelForTest()
lowerCamelCase__ : Union[str, Any] = torch.randn(2 , 3 )
lowerCamelCase__ : Union[str, Any] = test_model(lowercase_ )
lowerCamelCase__ : Union[str, Any] = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_ )
lowerCamelCase__ : Optional[Any] = test_model(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase__ : str = True
lowerCamelCase__ : Union[str, Any] = test_model(lowercase_ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase__ : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase__ : Optional[int] = model(lowercase_ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase_ , AlignDevicesHook(io_same_device=lowercase_ ) )
lowerCamelCase__ : str = torch.randn(2 , 3 ).to(0 )
lowerCamelCase__ : int = model(lowercase_ )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : List[str] ) -> str:
lowerCamelCase__ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase__ : List[str] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase__ : Optional[int] = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
lowerCamelCase__ : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase__ : List[Any] = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
lowerCamelCase__ : List[Any] = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase__ : Any = torch.randn(2 , 3 )
lowerCamelCase__ : List[Any] = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase__ : Optional[int] = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase__ : List[str] = torch.device(lowercase_ )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
lowerCamelCase__ : str = torch.randn(2 , 3 )
lowerCamelCase__ : Dict = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ , offload_buffers=lowercase_ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase__ : List[Any] = torch.randn(2 , 3 )
lowerCamelCase__ : List[str] = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def A_ ( self : Optional[Any] ) -> Any:
lowerCamelCase__ : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCamelCase__ : int = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase__ : Union[str, Any] = torch.device(lowercase_ )
self.assertEqual(model.batchnorm.running_mean.device , lowercase_ )
lowerCamelCase__ : List[str] = torch.randn(2 , 3 )
lowerCamelCase__ : int = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() , offload_buffers=lowercase_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCamelCase__ : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase__ : str = model(lowercase_ )
self.assertEqual(output.device , lowercase_ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_ )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 50 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__lowerCAmelCase : List[str] ={
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase : Dict ={
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase : Union[str, Any] ={
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase : str ={
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__lowerCAmelCase : Tuple ={
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__lowerCAmelCase : Dict ={
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase ( _lowerCamelCase : Tuple ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]=False ):
A__ = checkpoint[F"{old_prefix}.in_layers.0.weight"]
A__ = checkpoint[F"{old_prefix}.in_layers.0.bias"]
A__ = checkpoint[F"{old_prefix}.in_layers.2.weight"]
A__ = checkpoint[F"{old_prefix}.in_layers.2.bias"]
A__ = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
A__ = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
A__ = checkpoint[F"{old_prefix}.out_layers.0.weight"]
A__ = checkpoint[F"{old_prefix}.out_layers.0.bias"]
A__ = checkpoint[F"{old_prefix}.out_layers.3.weight"]
A__ = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
A__ = checkpoint[F"{old_prefix}.skip_connection.weight"]
A__ = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=None ):
A__, A__, A__ = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
A__, A__, A__ = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
A__ = checkpoint[F"{old_prefix}.norm.weight"]
A__ = checkpoint[F"{old_prefix}.norm.bias"]
A__ = weight_q.squeeze(-1 ).squeeze(-1 )
A__ = bias_q.squeeze(-1 ).squeeze(-1 )
A__ = weight_k.squeeze(-1 ).squeeze(-1 )
A__ = bias_k.squeeze(-1 ).squeeze(-1 )
A__ = weight_v.squeeze(-1 ).squeeze(-1 )
A__ = bias_v.squeeze(-1 ).squeeze(-1 )
A__ = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
A__ = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
A__ = {}
A__ = checkpoint["time_embed.0.weight"]
A__ = checkpoint["time_embed.0.bias"]
A__ = checkpoint["time_embed.2.weight"]
A__ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
A__ = checkpoint["label_emb.weight"]
A__ = checkpoint["input_blocks.0.0.weight"]
A__ = checkpoint["input_blocks.0.0.bias"]
A__ = unet_config["down_block_types"]
A__ = unet_config["layers_per_block"]
A__ = unet_config["attention_head_dim"]
A__ = unet_config["block_out_channels"]
A__ = 1
A__ = channels_list[0]
for i, layer_type in enumerate(_lowerCamelCase ):
A__ = channels_list[i]
A__ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_lowerCamelCase ):
A__ = F"down_blocks.{i}.resnets.{j}"
A__ = F"input_blocks.{current_layer}.0"
A__ = True if j == 0 and downsample_block_has_skip else False
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_lowerCamelCase ):
A__ = F"down_blocks.{i}.resnets.{j}"
A__ = F"input_blocks.{current_layer}.0"
A__ = True if j == 0 and downsample_block_has_skip else False
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
A__ = F"down_blocks.{i}.attentions.{j}"
A__ = F"input_blocks.{current_layer}.1"
A__ = convert_attention(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
A__ = F"down_blocks.{i}.downsamplers.0"
A__ = F"input_blocks.{current_layer}.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
A__ = current_channels
# hardcoded the mid-block for now
A__ = "mid_block.resnets.0"
A__ = "middle_block.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "mid_block.attentions.0"
A__ = "middle_block.1"
A__ = convert_attention(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "mid_block.resnets.1"
A__ = "middle_block.2"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = 0
A__ = unet_config["up_block_types"]
for i, layer_type in enumerate(_lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A__ = F"up_blocks.{i}.resnets.{j}"
A__ = F"output_blocks.{current_layer}.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
A__ = F"up_blocks.{i}.upsamplers.0"
A__ = F"output_blocks.{current_layer-1}.1"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A__ = F"up_blocks.{i}.resnets.{j}"
A__ = F"output_blocks.{current_layer}.0"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
A__ = F"up_blocks.{i}.attentions.{j}"
A__ = F"output_blocks.{current_layer}.1"
A__ = convert_attention(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
A__ = F"up_blocks.{i}.upsamplers.0"
A__ = F"output_blocks.{current_layer-1}.2"
A__ = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = checkpoint["out.0.weight"]
A__ = checkpoint["out.0.bias"]
A__ = checkpoint["out.2.weight"]
A__ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__lowerCAmelCase : Optional[Any] =parser.parse_args()
__lowerCAmelCase : List[Any] =strabool(args.class_cond)
__lowerCAmelCase : List[str] =os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__lowerCAmelCase : List[str] =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase : List[str] =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__lowerCAmelCase : Any =TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__lowerCAmelCase : Dict =None
__lowerCAmelCase : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
__lowerCAmelCase : Dict =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__lowerCAmelCase : List[str] =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__lowerCAmelCase : Dict =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase : Dict =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__lowerCAmelCase : Dict =CMStochasticIterativeScheduler(**scheduler_config)
__lowerCAmelCase : str =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 237 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Dict ) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCamelCase__ = (low + high) // 2
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = max_subarray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = max_subarray(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = max_cross_sum(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ) -> tuple[int, int, float]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = float("-inf" ), -1
UpperCamelCase__ , UpperCamelCase__ = float("-inf" ), -1
UpperCamelCase__ = 0
for i in range(SCREAMING_SNAKE_CASE_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCamelCase__ = summ
UpperCamelCase__ = i
UpperCamelCase__ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCamelCase__ = summ
UpperCamelCase__ = i
return max_left, max_right, (left_sum + right_sum)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> float:
'''simple docstring'''
UpperCamelCase__ = [randint(1 , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase__ = time.time()
max_subarray(SCREAMING_SNAKE_CASE_ , 0 , input_size - 1 )
UpperCamelCase__ = time.time()
return end - start
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCamelCase__ = [time_max_subarray(SCREAMING_SNAKE_CASE_ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
print(SCREAMING_SNAKE_CASE_ , "\t\t" , SCREAMING_SNAKE_CASE_ )
plt.plot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , A : List[Any]) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = torchvision.models.resnetaaa(pretrained=__UpperCAmelCase)
_UpperCAmelCase = list(model.children())[:-2]
_UpperCAmelCase = nn.Sequential(*__UpperCAmelCase)
_UpperCAmelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def _lowerCamelCase ( self : str , A : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.pool(self.model(__UpperCAmelCase))
_UpperCAmelCase = torch.flatten(__UpperCAmelCase , start_dim=2)
_UpperCAmelCase = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( a__ ):
def __init__( self : List[Any] , A : Dict , A : Optional[int] , A : Optional[Any] , A : Optional[Any] , A : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = [json.loads(__UpperCAmelCase) for l in open(__UpperCAmelCase)]
_UpperCAmelCase = os.path.dirname(__UpperCAmelCase)
_UpperCAmelCase = tokenizer
_UpperCAmelCase = labels
_UpperCAmelCase = len(__UpperCAmelCase)
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = transforms
def __len__( self : Dict) -> Tuple:
"""simple docstring"""
return len(self.data)
def __getitem__( self : Any , A : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__UpperCAmelCase))
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sentence[0], sentence[1:-1], sentence[-1]
_UpperCAmelCase = sentence[: self.max_seq_length]
_UpperCAmelCase = torch.zeros(self.n_classes)
_UpperCAmelCase = 1
_UpperCAmelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'])).convert('RGB')
_UpperCAmelCase = self.transforms(__UpperCAmelCase)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Counter()
for row in self.data:
label_freqs.update(row['label'])
return label_freqs
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = [len(row['sentence'] ) for row in batch]
_UpperCAmelCase , _UpperCAmelCase = len(_lowerCAmelCase ), max(_lowerCAmelCase )
_UpperCAmelCase = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
_UpperCAmelCase = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
_UpperCAmelCase = input_row['sentence']
_UpperCAmelCase = 1
_UpperCAmelCase = torch.stack([row['image'] for row in batch] )
_UpperCAmelCase = torch.stack([row['label'] for row in batch] )
_UpperCAmelCase = torch.stack([row['image_start_token'] for row in batch] )
_UpperCAmelCase = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A ( ) -> Optional[int]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A ( ) -> Tuple:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 339 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 320 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Optional[int] = '''examples/'''
__UpperCamelCase : List[Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : Optional[Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Any = '''README.md'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
with open(A_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : List[str] = f.read()
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = REPLACE_PATTERNS[pattern]
lowerCAmelCase__ : Optional[int] = replace.replace('''VERSION''' , A_ )
lowerCAmelCase__ : Optional[int] = re_pattern.sub(A_ , A_ )
with open(A_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='''examples''' )
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase__ : int = '''1. Want to contribute a new model?'''
with open(A_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase__ : Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase__ : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase__ : List[Any] = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(A_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A_ )
def __SCREAMING_SNAKE_CASE ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
lowerCAmelCase__ : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def __SCREAMING_SNAKE_CASE ( A_=False ):
lowerCAmelCase__ : Any = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase__ : int = default_version.base_version
elif patch:
lowerCAmelCase__ : List[str] = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase__ : Dict = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase__ : List[str] = input(f'Which version are you releasing? [{default_version}]' )
if len(A_ ) == 0:
lowerCAmelCase__ : List[str] = default_version
print(f'Updating version to {version}.' )
global_version_update(A_ , patch=A_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Union[str, Any] = get_version()
lowerCAmelCase__ : int = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase__ : Tuple = input(f'Which version are we developing now? [{dev_version}]' )
if len(A_ ) == 0:
lowerCAmelCase__ : List[str] = dev_version
print(f'Updating version to {version}.' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 74 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = 0
for ch in input_str:
lowerCAmelCase__ : Any = ord(A_ )
lowerCAmelCase__ : Any = pow(2 , A_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=1_0 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.9 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = tubelet_size
__SCREAMING_SNAKE_CASE = num_frames
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = mask_ratio
__SCREAMING_SNAKE_CASE = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__SCREAMING_SNAKE_CASE = int(mask_ratio * self.seq_length)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = VideoMAEModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__SCREAMING_SNAKE_CASE = torch.ones((self.num_masks,))
__SCREAMING_SNAKE_CASE = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
__SCREAMING_SNAKE_CASE = mask.expand(self.batch_size , -1).bool()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , lowerCAmelCase__)
# model only returns predictions for masked patches
__SCREAMING_SNAKE_CASE = mask.sum().item()
__SCREAMING_SNAKE_CASE = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__lowercase : int = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__lowercase : int = False
__lowercase : Optional[int] = False
__lowercase : Optional[Any] = False
__lowercase : Optional[Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = VideoMAEModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = copy.deepcopy(lowerCAmelCase__)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__SCREAMING_SNAKE_CASE = torch.ones((self.model_tester.num_masks,))
__SCREAMING_SNAKE_CASE = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
__SCREAMING_SNAKE_CASE = mask.expand(self.model_tester.batch_size , -1).bool()
__SCREAMING_SNAKE_CASE = bool_masked_pos.to(lowerCAmelCase__)
if return_labels:
if model_class in [
*get_values(lowerCAmelCase__),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = VideoMAEModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def snake_case_ ( self):
if not self.has_attentions:
pass
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length - self.model_tester.num_masks
__SCREAMING_SNAKE_CASE = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(out_len + 1 , len(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length - self.model_tester.num_masks
__SCREAMING_SNAKE_CASE = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def snake_case_ ( self):
pass
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__SCREAMING_SNAKE_CASE = np.load(UpperCamelCase_ )
return list(UpperCamelCase_ )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""").to(
lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""pt""").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([0.36_69, -0.06_88, -0.24_21]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""").to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_video()
__SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""pt""").to(lowerCAmelCase__)
# add boolean mask, indicating which patches to mask
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""")
__SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size([1, 1_4_0_8, 1_5_3_6])
__SCREAMING_SNAKE_CASE = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowerCAmelCase__)
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `True`)
__SCREAMING_SNAKE_CASE = torch.tensor([0.51_42] , device=lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `False`)
__SCREAMING_SNAKE_CASE = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=lowerCAmelCase__).to(
lowerCAmelCase__)
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(torch.tensor([0.64_69]) , device=lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4))
| 100 |
"""simple docstring"""
from math import isqrt, loga
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = False
return [i for i in range(2 , UpperCamelCase_ ) if is_prime[i]]
def _lowerCAmelCase ( UpperCamelCase_ = 80_0800 , UpperCamelCase_ = 80_0800 ):
__SCREAMING_SNAKE_CASE = degree * loga(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = int(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = calculate_prime_numbers(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
snake_case__ : List[Any] = '''true'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Any=82 , lowerCamelCase_ : str=16 ):
"""simple docstring"""
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : Any = deepcopy(_lowerCamelCase )
UpperCAmelCase_ : List[str] = RegressionDataset(length=_lowerCamelCase )
UpperCAmelCase_ : Any = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase )
model.to(accelerator.device )
UpperCAmelCase_ : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return model, ddp_model, dataloader
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=False ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
UpperCAmelCase_ : List[Any] = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase_ : Optional[Any] ):
UpperCAmelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
UpperCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : Any ):
if use_longest:
return tokenizer.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_lowerCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=16 )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Accelerator(dispatch_batches=_lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase_ : List[str] = get_dataloader(_lowerCamelCase , not dispatch_batches )
UpperCAmelCase_ : int = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_lowerCamelCase )
UpperCAmelCase_ : Any = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
for batch in dataloader:
UpperCAmelCase_ : Dict = batch.values()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(_lowerCamelCase )
UpperCAmelCase_ : int = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ : str = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCamelCase )
targs.append(_lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.cat(_lowerCamelCase ), torch.cat(_lowerCamelCase )
return logits, targs
def _lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any]=82 , lowerCamelCase_ : int=False , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Tuple=16 ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_basic_setup(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = generate_predictions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert (
len(_lowerCamelCase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCamelCase )}'''
def _lowerCamelCase ( lowerCamelCase_ : int = False , lowerCamelCase_ : Tuple = False ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
UpperCAmelCase_ : Dict = get_mrpc_setup(_lowerCamelCase , _lowerCamelCase )
# First do baseline
UpperCAmelCase_ : Optional[Any] = setup["""no"""]
model.to(_lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCamelCase )
with torch.inference_mode():
UpperCAmelCase_ : List[Any] = model(**_lowerCamelCase )
UpperCAmelCase_ : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCamelCase , references=batch['labels'] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ : Optional[int] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Union[str, Any] = model(**_lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : List[str] = batch["""labels"""]
UpperCAmelCase_ : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCamelCase , references=_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : str = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_lowerCamelCase , _lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Tuple = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_lowerCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
UpperCAmelCase_ : Optional[Any] = Accelerator()
test_torch_metrics(_lowerCamelCase , 512 )
accelerator.state._reset_state()
def _lowerCamelCase ( lowerCamelCase_ : List[str] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( *snake_case_ , **snake_case_ ):
'''simple docstring'''
pass
def _lowerCamelCase ( lowerCamelCase_ : Image ):
"""simple docstring"""
UpperCAmelCase_ : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Optional[int] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = DepthEstimationPipeline(model=snake_case_ , image_processor=snake_case_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , snake_case_ )
import datasets
UpperCAmelCase_ : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
UpperCAmelCase_ : Union[str, Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , snake_case_ , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCamelCase ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = 'Intel/dpt-large'
UpperCAmelCase_ : Dict = pipeline('depth-estimation' , model=snake_case_ )
UpperCAmelCase_ : Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
UpperCAmelCase_ : Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 )
@require_torch
def _UpperCamelCase ( self ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 274 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__a = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : List[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
_UpperCAmelCase : Optional[int] = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCAmelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCAmelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__ )
with open(lowerCAmelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCAmelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCAmelCase__ , lowerCAmelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCAmelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCAmelCase__ ) , )
| 145 |
'''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
return math.sqrt(a_ ) * math.sqrt(a_ ) == num
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[str] = n
while left <= right:
_UpperCAmelCase : Dict = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase : int = mid - 1
else:
_UpperCAmelCase : Tuple = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = tempfile.mkdtemp()
snake_case : Tuple = BlipImageProcessor()
snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
snake_case : Dict = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def _SCREAMING_SNAKE_CASE (self : str , **snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> str:
'''simple docstring'''
snake_case : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
snake_case : Dict = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(snake_case__ , return_tensors="np" )
snake_case : List[Any] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Union[str, Any] = "lower newer"
snake_case : int = processor(text=snake_case__ )
snake_case : List[str] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : int = "lower newer"
snake_case : str = self.prepare_image_inputs()
snake_case : Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Dict = processor.batch_decode(snake_case__ )
snake_case : str = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> str:
'''simple docstring'''
snake_case : int = self.get_image_processor()
snake_case : int = self.get_tokenizer()
snake_case : Optional[int] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
snake_case : List[str] = "lower newer"
snake_case : int = self.prepare_image_inputs()
snake_case : Dict = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 59 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : int ) -> List[Any]:
__snake_case : List[Any] = params
__snake_case : int = np.array(lowerCamelCase )
__snake_case : Any = np.array([len(lowerCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , lowerCamelCase : Dict ) -> str:
return (self.token_ids[index], self.lengths[index])
def __len__( self : Dict ) -> Union[str, Any]:
return len(self.lengths )
def __snake_case ( self : Optional[int] ) -> str:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Optional[int] = self.params.max_model_input_size
__snake_case : Union[str, Any] = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCamelCase )} too long sequences.' )
def divide_chunks(lowerCamelCase : str , lowerCamelCase : Any ):
return [l[i : i + n] for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
__snake_case : str = []
__snake_case : int = []
if self.params.mlm:
__snake_case , __snake_case : Optional[int] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
__snake_case , __snake_case : List[Any] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__snake_case : str = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__snake_case : List[Any] = np.insert(lowerCamelCase , 0 , lowerCamelCase )
if sub_s[-1] != sep_id:
__snake_case : Optional[Any] = np.insert(lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase )
assert len(lowerCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCamelCase )
new_tok_ids.extend(lowerCamelCase )
new_lengths.extend([len(lowerCamelCase ) for l in sub_seqs] )
__snake_case : str = np.array(lowerCamelCase )
__snake_case : Optional[Any] = np.array(lowerCamelCase )
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : List[Any] = len(self )
__snake_case : int = self.lengths > 11
__snake_case : str = self.token_ids[indices]
__snake_case : Optional[int] = self.lengths[indices]
__snake_case : Tuple = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def __snake_case ( self : int ) -> Optional[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__snake_case : str = self.params.special_tok_ids["unk_token"]
__snake_case : Optional[Any] = len(self )
__snake_case : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__snake_case : int = (unk_occs / self.lengths) < 0.5
__snake_case : Tuple = self.token_ids[indices]
__snake_case : List[str] = self.lengths[indices]
__snake_case : List[Any] = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def __snake_case ( self : str ) -> List[str]:
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __snake_case ( self : Any , lowerCamelCase : Any ) -> Union[str, Any]:
__snake_case : str = [t[0] for t in batch]
__snake_case : Union[str, Any] = [t[1] for t in batch]
assert len(lowerCamelCase ) == len(lowerCamelCase )
# Max for paddings
__snake_case : List[str] = max(lowerCamelCase )
# Pad token ids
if self.params.mlm:
__snake_case : Any = self.params.special_tok_ids["pad_token"]
else:
__snake_case : List[str] = self.params.special_tok_ids["unk_token"]
__snake_case : Any = [list(t.astype(lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase )) for t in token_ids]
assert len(tk_ ) == len(lowerCamelCase )
assert all(len(lowerCamelCase ) == max_seq_len_ for t in tk_ )
__snake_case : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
__snake_case : str = torch.tensor(lowerCamelCase ) # (bs)
return tk_t, lg_t
| 134 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_snake_case : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def lowerCAmelCase_ ( __lowerCamelCase="no" , __lowerCamelCase = default_json_config_file , __lowerCamelCase = False ):
__snake_case : int = Path(__lowerCamelCase )
path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__snake_case : Optional[int] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__snake_case : Dict = torch.cuda.device_count()
__snake_case : Tuple = num_gpus
__snake_case : List[str] = False
if num_gpus > 1:
__snake_case : Optional[int] = "MULTI_GPU"
else:
__snake_case : Dict = "NO"
elif is_xpu_available() and use_xpu:
__snake_case : List[str] = torch.xpu.device_count()
__snake_case : str = num_xpus
__snake_case : int = False
if num_xpus > 1:
__snake_case : Optional[int] = "MULTI_XPU"
else:
__snake_case : str = "NO"
elif is_npu_available():
__snake_case : Any = torch.npu.device_count()
__snake_case : str = num_npus
__snake_case : str = False
if num_npus > 1:
__snake_case : Optional[int] = "MULTI_NPU"
else:
__snake_case : int = "NO"
else:
__snake_case : List[Any] = 0
__snake_case : Dict = True
__snake_case : Tuple = 1
__snake_case : Tuple = "NO"
__snake_case : str = ClusterConfig(**__lowerCamelCase )
config.to_json_file(__lowerCamelCase )
return path
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = parser.add_parser("default" , parents=__lowerCamelCase , help=__lowerCamelCase , formatter_class=__lowerCamelCase )
parser.add_argument(
"--config_file" , default=__lowerCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowerCamelCase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowerCamelCase )
return parser
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 134 | 1 |
"""simple docstring"""
_lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 74 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : str ,A_ : Dict=13 ,A_ : str=7 ,A_ : str=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=True ,A_ : Optional[Any]=True ,A_ : Any=False ,A_ : str=False ,A_ : Tuple=False ,A_ : str=2 ,A_ : Optional[int]=99 ,A_ : Union[str, Any]=0 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Union[str, Any]=2 ,A_ : Any=0.02 ,A_ : List[str]=2 ,A_ : int=4 ,A_ : int="last" ,A_ : Dict=True ,A_ : Union[str, Any]=None ,A_ : Any=0 ,) -> List[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : int ,A_ : Dict ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : int ,A_ : str ,) -> Any:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ,A_ : str ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : str ,A_ : Any ,A_ : str ,A_ : Dict ,) -> Dict:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Any ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,) -> int:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Optional[int] ,A_ : Any ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ,) -> List[Any]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : int ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> Optional[int]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int] ,) -> List[str]:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : List[Any] ,) -> List[str]:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[int] ,A_ : List[Any]=False ) -> int:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Any ,A_ : str ,A_ : Tuple ,A_ : Any ,A_ : Any=False ,A_ : Any=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ,A_ : int ,A_ : Any ,A_ : str=False ,A_ : Any=1 ) -> Tuple:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ )
| 74 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : Optional[Any] = pd.read_csv('''sample_data.csv''', header=None)
__A : List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Dict = df.iloc[:, 1:2]
__A : List[str] = actual_data.values.reshape(len_data, 1)
__A : List[str] = MinMaxScaler().fit_transform(actual_data)
__A : List[str] = 10
__A : Optional[int] = 5
__A : Optional[int] = 20
__A : Optional[Any] = len_data - periods * look_back
__A : int = actual_data[:division]
__A : int = actual_data[division - look_back :]
__A , __A : Dict = [], []
__A , __A : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : List[Any] = np.array(train_x)
__A : str = np.array(test_x)
__A : Dict = np.array([list(i.ravel()) for i in train_y])
__A : int = np.array([list(i.ravel()) for i in test_y])
__A : Optional[Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__A : Union[str, Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__A : Union[str, Any] = model.predict(x_test)
| 27 |
"""simple docstring"""
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 27 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ ) -> list[list[int]]:
__a = []
create_all_state(1 , a__ , a__ , [] , a__ )
return result
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(a__ , total_number - level + 2 ):
current_list.append(a__ )
create_all_state(i + 1 , a__ , level - 1 , a__ , a__ )
current_list.pop()
def __lowerCAmelCase ( a__ ) -> None:
for i in total_list:
print(*a__ )
if __name__ == "__main__":
A : Union[str, Any] = 4
A : Optional[int] = 2
A : Dict = generate_all_combinations(n, k)
print_all_state(total_list)
| 6 |
from sklearn.metrics import fa_score
import datasets
A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
A__ = fa_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase )
return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
| 274 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''facebook/bart-large-mnli'''
__SCREAMING_SNAKE_CASE = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__SCREAMING_SNAKE_CASE = '''text_classifier'''
__SCREAMING_SNAKE_CASE = AutoTokenizer
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification
__SCREAMING_SNAKE_CASE = ['''text''', ['''text''']]
__SCREAMING_SNAKE_CASE = ['''text''']
def __lowerCamelCase ( self ) -> str:
super().setup()
__UpperCamelCase = self.model.config
__UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
__UpperCamelCase = int(__A )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
__UpperCamelCase = labels
return self.pre_processor(
[text] * len(__A ) , [f"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __lowerCamelCase ( self , lowercase ) -> Union[str, Any]:
__UpperCamelCase = outputs.logits
__UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 359 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = filter(lambda __A : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
__UpperCamelCase = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=f"val_{metric}" ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def _lowercase ( __A ,__A ):
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" ,mode="""min""" if """loss""" in metric else """max""" ,patience=__A ,verbose=__A ,)
class UpperCAmelCase__ ( pl.Callback):
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / """test_results.txt"""
__UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"{key}: {val:.6f}\n"
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 243 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowercase ( __snake_case ,__snake_case ) -> str | Literal[False]:
__lowerCAmelCase : Optional[int] = list(lowerCAmelCase_ )
__lowerCAmelCase : Optional[Any] = list(lowerCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
__lowerCAmelCase : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def _lowercase ( __snake_case ) -> list[str]:
__lowerCAmelCase : Optional[int] = []
while True:
__lowerCAmelCase : Any = ['$'] * len(lowerCAmelCase_ )
__lowerCAmelCase : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 ,len(lowerCAmelCase_ ) ):
__lowerCAmelCase : Optional[int] = compare_string(binary[i] ,binary[j] )
if k is False:
__lowerCAmelCase : Optional[Any] = '*'
__lowerCAmelCase : Optional[Any] = '*'
temp.append("X" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
__lowerCAmelCase : Any = list(set(lowerCAmelCase_ ) )
def _lowercase ( __snake_case ,__snake_case ) -> list[str]:
__lowerCAmelCase : int = []
for minterm in minterms:
__lowerCAmelCase : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> bool:
__lowerCAmelCase : int = list(lowerCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = list(lowerCAmelCase_ )
__lowerCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowercase ( __snake_case ,__snake_case ) -> list[str]:
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
__lowerCAmelCase : int = j
if count == 1:
__lowerCAmelCase : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : Any = 0
temp.append(prime_implicants[i] )
while True:
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : List[Any] = -1
__lowerCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
__lowerCAmelCase : Any = count_n
__lowerCAmelCase : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : List[str] = 0
def _lowercase ( __snake_case ,__snake_case ) -> list[list[int]]:
__lowerCAmelCase : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : str = prime_implicants[i].count("_" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,lowerCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = 1
return chart
def _lowercase ( ) -> None:
__lowerCAmelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
__lowerCAmelCase : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
"Enter the decimal representation of Minterms \'Spaces Separated\'\n" ).split()
]
__lowerCAmelCase : List[str] = decimal_to_binary(lowerCAmelCase_ ,lowerCAmelCase_ )
__lowerCAmelCase : Dict = check(lowerCAmelCase_ )
print("Prime Implicants are:" )
print(lowerCAmelCase_ )
__lowerCAmelCase : List[Any] = prime_implicant_chart(lowerCAmelCase_ ,lowerCAmelCase_ )
__lowerCAmelCase : int = selection(lowerCAmelCase_ ,lowerCAmelCase_ )
print("Essential Prime Implicants are:" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Union[str, Any] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__snake_case : List[str] = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__snake_case : Tuple = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__snake_case : Any = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=False ) -> int:
'''simple docstring'''
A__ : Optional[Any] =spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 134 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase ( unittest.TestCase , lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
A__ : int =load_tool("""text-to-speech""" )
self.tool.setup()
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : List[str] =self.tool("""hey""" )
A__ : Dict =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Optional[int] =self.tool("""hey""" )
A__ : Tuple =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 134 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 360 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@add_end_docstrings(_lowerCamelCase )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case = None
if self.model.config.prefix is not None:
_snake_case = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case = self._sanitize_parameters(prefix=lowerCAmelCase_ , **self._forward_params )
_snake_case = {**self._preprocess_params, **preprocess_params}
_snake_case = {**self._forward_params, **forward_params}
def lowerCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = {}
if prefix is not None:
_snake_case = prefix
if prefix:
_snake_case = self.tokenizer(
lowerCAmelCase_ , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
_snake_case = handle_long_generation
preprocess_params.update(lowerCAmelCase_ )
_snake_case = generate_kwargs
_snake_case = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_snake_case = ReturnType.TENSORS
if return_type is not None:
_snake_case = return_type
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
_snake_case = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=self.framework )
_snake_case = prompt_text
if handle_long_generation == "hole":
_snake_case = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case = generate_kwargs['max_new_tokens']
else:
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_snake_case = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case = inputs['attention_mask'][:, -keep_length:]
return inputs
def lowerCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = model_inputs['input_ids']
_snake_case = model_inputs.get('attention_mask' , lowerCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case = None
_snake_case = None
_snake_case = 1
else:
_snake_case = input_ids.shape[0]
_snake_case = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_snake_case = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case = self.model.generate(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case = generated_sequence.reshape(lowerCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_snake_case = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=ReturnType.FULL_TEXT , lowerCAmelCase_=True ) -> int:
_snake_case = model_outputs['generated_sequence'][0]
_snake_case = model_outputs['input_ids']
_snake_case = model_outputs['prompt_text']
_snake_case = generated_sequence.numpy().tolist()
_snake_case = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case = self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case = 0
else:
_snake_case = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
_snake_case = prompt_text + text[prompt_length:]
else:
_snake_case = text[prompt_length:]
_snake_case = {'generated_text': all_text}
records.append(lowerCAmelCase_ )
return records
| 295 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase ():
__a , __a : Union[str, Any] = 9, 14 # noqa: F841
__a : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a : Dict = defaultdict(_SCREAMING_SNAKE_CASE )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a : Union[str, Any] = mst(_SCREAMING_SNAKE_CASE )
__a : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a : Optional[Any] = tuple(answer[:2] )
__a : Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase : Dict = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Any:
__UpperCamelCase = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 2_0, 'a ' * 3_0, 'b ' * 7],
}
__UpperCamelCase = Dataset.from_dict(snake_case )
return dataset
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase = make_duplicate_clusters(__UpperCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = get_dataset()
__UpperCamelCase , __UpperCamelCase = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , __UpperCAmelCase )
| 263 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
| 243 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[Any]:
if not is_accelerate_available():
return method
lowerCamelCase__ : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCamelCase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self , *UpperCamelCase , **UpperCamelCase ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCamelCase , **UpperCamelCase )
return wrapper
| 129 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : List[str] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowercase ( _lowercase ):
a = """marian"""
a = ["""past_key_values"""]
a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=58_101 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Any=12 , UpperCamelCase__: Optional[int]=4_096 , UpperCamelCase__: Tuple=16 , UpperCamelCase__: Dict=12 , UpperCamelCase__: Optional[Any]=4_096 , UpperCamelCase__: Any=16 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: Tuple=0.0 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[Any]=0.0 , UpperCamelCase__: Optional[int]=0.02 , UpperCamelCase__: str=58_100 , UpperCamelCase__: Tuple=False , UpperCamelCase__: Optional[Any]=58_100 , UpperCamelCase__: int=0 , UpperCamelCase__: Union[str, Any]=0 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: str , ):
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : Tuple = decoder_vocab_size or vocab_size
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = d_model
lowerCamelCase__ : int = encoder_ffn_dim
lowerCamelCase__ : Union[str, Any] = encoder_layers
lowerCamelCase__ : Dict = encoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : List[str] = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : int = dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : List[str] = activation_function
lowerCamelCase__ : Union[str, Any] = init_std
lowerCamelCase__ : str = encoder_layerdrop
lowerCamelCase__ : Any = decoder_layerdrop
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : List[str] = encoder_layers
lowerCamelCase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _lowercase ( _lowercase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase_ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : List[str] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ : Dict = {0: """batch"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ : Any = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase_ ( self: Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super().outputs
else:
lowerCamelCase__ : Any = super(UpperCamelCase__ , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
lowerCamelCase__ : Any = seq_length if not self.use_past else 1
lowerCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[int] = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = common_inputs["""input_ids"""].shape
lowerCamelCase__ : Tuple = common_inputs["""decoder_input_ids"""].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.num_attention_heads
lowerCamelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Tuple = decoder_seq_length + 3
lowerCamelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
lowerCamelCase__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : Any = self.num_layers
lowerCamelCase__ : str = min(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
lowerCamelCase__ : int = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[Any] = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Dict = self.num_attention_heads
lowerCamelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] = common_inputs["""attention_mask"""].dtype
lowerCamelCase__ : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
lowerCamelCase__ : int = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : List[Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Union[str, Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
lowerCamelCase__ : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Union[str, Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : str = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: PreTrainedTokenizer , UpperCamelCase__: int = -1 , UpperCamelCase__: int = -1 , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
lowerCamelCase__ : Tuple = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : List[Any] = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return 1e-4
| 129 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(__a , __a , __a=0 , __a=None ):
lowerCamelCase__: str =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCamelCase__: Dict =math.floor(val / multiple ) * multiple
if x < min_val:
lowerCamelCase__: Optional[Any] =math.ceil(val / multiple ) * multiple
return x
lowerCamelCase__: str =(output_size, output_size) if isinstance(_snake_case , _snake_case ) else output_size
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =get_image_size(_snake_case )
lowerCamelCase__ , lowerCamelCase__: Tuple =output_size
# determine new height and width
lowerCamelCase__: List[Any] =output_height / input_height
lowerCamelCase__: Optional[int] =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCamelCase__: List[Any] =scale_width
else:
# fit height
lowerCamelCase__: List[Any] =scale_height
lowerCamelCase__: List[str] =constraint_to_multiple_of(scale_height * input_height , multiple=_snake_case )
lowerCamelCase__: Dict =constraint_to_multiple_of(scale_width * input_width , multiple=_snake_case )
return (new_height, new_width)
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
lowercase_ = ['''pixel_values''']
def __init__(self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->int:
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
lowerCamelCase__: int =size if size is not None else {"height": 384, "width": 384}
lowerCamelCase__: Optional[Any] =get_size_dict(_UpperCAmelCase)
lowerCamelCase__: List[Any] =do_resize
lowerCamelCase__: Union[str, Any] =size
lowerCamelCase__: Any =keep_aspect_ratio
lowerCamelCase__: List[Any] =ensure_multiple_of
lowerCamelCase__: Optional[Any] =resample
lowerCamelCase__: Optional[int] =do_rescale
lowerCamelCase__: Optional[int] =rescale_factor
lowerCamelCase__: Tuple =do_normalize
lowerCamelCase__: Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__: int =image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""")
lowerCamelCase__: int =get_resize_output_image_size(
_UpperCAmelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ) ->str:
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Any , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: int =size if size is not None else self.size
lowerCamelCase__: Tuple =get_size_dict(_UpperCAmelCase)
lowerCamelCase__: List[Any] =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCamelCase__: Optional[Any] =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCamelCase__: str =resample if resample is not None else self.resample
lowerCamelCase__: Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: List[Any] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: List[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase__: int =make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
lowerCamelCase__: Optional[int] =[to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
lowerCamelCase__: Tuple =[self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_rescale:
lowerCamelCase__: List[Any] =[self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
lowerCamelCase__: Dict =[self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
lowerCamelCase__: Optional[int] =[to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
lowerCamelCase__: Optional[int] ={"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Tuple] = None) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase) != len(_UpperCAmelCase):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(_UpperCAmelCase):
lowerCamelCase__: str =target_sizes.numpy()
lowerCamelCase__: int =[]
for idx in range(len(_UpperCAmelCase)):
lowerCamelCase__: List[Any] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=_UpperCAmelCase)
lowerCamelCase__: Tuple =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_UpperCAmelCase)
else:
lowerCamelCase__: Union[str, Any] =logits.argmax(dim=1)
lowerCamelCase__: str =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 10 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A_ : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
A_ : List[Any] = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
A_ : List[str] = '''▁'''
class lowercase ( _a ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self ,a_ ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,a_ = None ,**a_ ,) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : int = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
_UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
_UpperCAmelCase : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_UpperCAmelCase : Tuple = len(self.sp_model ) - 1
_UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self ,a_ ,a_ = None ) -> Union[str, Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self ,a_ ,a_ = None ,a_ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self ,a_ ,a_ = None ) -> Dict:
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ) -> str:
return len(self.sp_model )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Optional[int] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self ,a_ ) -> Any:
return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
def _snake_case ( self ,a_ ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : List[Any] = self.sp_model.PieceToId(__lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def _snake_case ( self ,a_ ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__lowerCamelCase )
def _snake_case ( self ,a_ ) -> List[str]:
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : str = """"""
_UpperCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self ) -> List[Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : str = None
return state
def __setstate__( self ,a_ ) -> Optional[int]:
_UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_UpperCAmelCase : Any = {}
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self ,a_ ,a_ = None ) -> Optional[Any]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,"""wb""" ) as fi:
_UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """roformer"""
def __init__( self ,a_=50_000 ,a_=None ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1_536 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=False ,a_=True ,**a_ ,) -> Tuple:
super().__init__(pad_token_id=a_ ,**a_ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : str = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Optional[int] = rotary_value
_UpperCAmelCase : Any = use_cache
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : List[Any] = {0: """batch""", 1: """sequence"""}
_UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349 | 0 |
'''simple docstring'''
from itertools import permutations
def __lowerCamelCase ( __snake_case : Tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A__ : Union[str, Any] =[7, 11, 13, 17]
for i, test in enumerate(lowercase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( __snake_case : List[Any] = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(lowercase__, lowercase__ ) ) )
for num in permutations(range(lowercase__ ) )
if is_substring_divisible(lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 134 |
from math import isqrt
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= 1
__lowercase= 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 295 | 0 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> List[Any]:
# Load checkpoint
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__a = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__a = v
else:
__a = v
__a = chkpt['''params''']
__a = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
__a = chkpt['''dico_word2id''']
__a = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__a = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase_ = len(UpperCamelCase )
self.assertGreater(UpperCamelCase , 0 )
self.assertEqual(
UpperCamelCase , [
{
"score": ANY(UpperCamelCase ),
"label": ANY(UpperCamelCase ),
"box": {"xmin": ANY(UpperCamelCase ), "ymin": ANY(UpperCamelCase ), "xmax": ANY(UpperCamelCase ), "ymax": ANY(UpperCamelCase )},
}
for i in range(UpperCamelCase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 0.2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=UpperCamelCase , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 55 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(UpperCamelCase__ ) as metadata_file:
_UpperCAmelCase : Dict = json.load(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# Load the entity vocab file
_UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
_UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name]
_UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
_UpperCAmelCase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']]
_UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval()
_UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' )
_UpperCAmelCase : List[str] = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCAmelCase : Dict = (39, 42)
_UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' )
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
_UpperCAmelCase : str = torch.Size((1, 42, 1024) )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) )
_UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCAmelCase : int = torch.Size((1, 1, 1024) )
_UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCAmelCase : List[str] = torch.Size((1, 1, 768) )
_UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Any = {}
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' )
_UpperCAmelCase : Tuple = index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowerCAmelCase :Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 263 | 0 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(_A, _A ) )
if __name__ == "__main__":
__magic_name__: Optional[Any] = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 138 |
def UpperCamelCase ( _A = 1, _A = 1000 ):
"""simple docstring"""
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = 0
for divide_by_number in range(_A, digit + 1 ):
__magic_name__ : list[int] = []
__magic_name__ : Any = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_A ):
__magic_name__ : int = len(_A )
__magic_name__ : Dict = divide_by_number
else:
has_been_divided.append(_A )
__magic_name__ : Optional[int] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
from torch import nn
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""")
| 129 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Dict =logging.get_logger(__name__)
__snake_case : Optional[int] ={
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""xlm-roberta-xl"""
def __init__(self ,__lowerCamelCase=25_08_80 ,__lowerCamelCase=25_60 ,__lowerCamelCase=36 ,__lowerCamelCase=32 ,__lowerCamelCase=1_02_40 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_14 ,__lowerCamelCase=1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-05 ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase="absolute" ,__lowerCamelCase=True ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = position_embedding_type
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : str = classifier_dropout
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@property
def lowerCAmelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 129 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _UpperCamelCase : list[float] ) -> List[Any]:
return np.maximum(0, __UpperCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 368 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__snake_case : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__snake_case : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__snake_case : Optional[int] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if rouge_types is None:
A_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
A_ = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = scoring.BootstrapAggregator()
else:
A_ = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
A_ = aggregator.aggregate()
else:
A_ = {}
for key in scores[0]:
A_ = [score[key] for score in scores]
return result
| 18 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : str = None
@staticmethod
def _a ( ) -> Optional[int]:
raise NotImplementedError
def _a ( self , A_ , A_ , A_ , **A_ ) -> Dict:
raise NotImplementedError
def _a ( self , A_ ) -> List[Any]:
raise NotImplementedError
def _a ( self ) -> str:
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def _a ( cls ) -> int:
return f'`pip install {cls.pip_package or cls.name}`'
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "optuna"
@staticmethod
def _a ( ) -> Union[str, Any]:
return is_optuna_available()
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[Any]:
return run_hp_search_optuna(A_ , A_ , A_ , **A_ )
def _a ( self , A_ ) -> Dict:
return default_hp_space_optuna(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "ray"
UpperCAmelCase__ : str = "'ray[tune]'"
@staticmethod
def _a ( ) -> str:
return is_ray_available()
def _a ( self , A_ , A_ , A_ , **A_ ) -> str:
return run_hp_search_ray(A_ , A_ , A_ , **A_ )
def _a ( self , A_ ) -> List[Any]:
return default_hp_space_ray(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "sigopt"
@staticmethod
def _a ( ) -> Tuple:
return is_sigopt_available()
def _a ( self , A_ , A_ , A_ , **A_ ) -> int:
return run_hp_search_sigopt(A_ , A_ , A_ , **A_ )
def _a ( self , A_ ) -> Union[str, Any]:
return default_hp_space_sigopt(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "wandb"
@staticmethod
def _a ( ) -> Any:
return is_wandb_available()
def _a ( self , A_ , A_ , A_ , **A_ ) -> Optional[Any]:
return run_hp_search_wandb(A_ , A_ , A_ , **A_ )
def _a ( self , A_ ) -> List[str]:
return default_hp_space_wandb(A_ )
_A = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _UpperCAmelCase ( ):
__UpperCamelCase =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =available_backends[0].name
if len(SCREAMING_SNAKE_CASE__ ) > 1:
logger.info(
F'{len(SCREAMING_SNAKE_CASE__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 62 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a__ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , lowercase = None ) -> List[str]:
__UpperCamelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__UpperCamelCase = Extractor
def __lowerCamelCase ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__UpperCamelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def __lowerCamelCase ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def __lowerCamelCase ( self , lowercase , lowercase = False ) -> str:
__UpperCamelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
__UpperCamelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
@abstractmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
...
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = []
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> int:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
__UpperCamelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
__UpperCamelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCAmelCase_):
@classmethod
def __lowerCamelCase ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
__UpperCamelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
__UpperCamelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x1F\x8B''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
__UpperCamelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__UpperCamelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
__UpperCamelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
__UpperCamelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__UpperCamelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x42\x5A\x68''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCamelCase ( cls ) -> Union[str, Any]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCamelCase ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCamelCase ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
__UpperCamelCase = cls._get_magic_number_max_length()
__UpperCamelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
__UpperCamelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
__UpperCamelCase = extractor if extractor != """deprecated""" else extractor_format
else:
__UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 349 | 0 |
__A ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 364 |
from collections import defaultdict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('''Enter the first string ''').strip()
__A =input('''Enter the second string ''').strip()
__A =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 47 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Any = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = "timesformer"
def __init__( self : List[str] , A : List[Any]=2_24 , A : int=16 , A : int=3 , A : Tuple=8 , A : Optional[int]=7_68 , A : Any=12 , A : Optional[int]=12 , A : int=30_72 , A : Optional[Any]="gelu" , A : Optional[Any]=0.0 , A : Dict=0.0 , A : int=0.02 , A : Tuple=1e-6 , A : Optional[int]=True , A : str="divided_space_time" , A : List[Any]=0 , **A : Any , ) -> Tuple:
super().__init__(**__lowerCamelCase )
lowercase_ : Any = image_size
lowercase_ : Union[str, Any] = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Dict = num_frames
lowercase_ : int = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Tuple = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : str = qkv_bias
lowercase_ : str = attention_type
lowercase_ : Dict = drop_path_rate
| 33 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Dict , A : UNetaDModel , A : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : Union[str, Any] , A : int = 1 , A : int = 50 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[str] = "pil" , A : bool = True , **A : Tuple , ):
__snake_case: List[Any] = self.unet.config.sample_size
__snake_case: Dict = (batch_size, 3, img_size, img_size)
__snake_case: List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__snake_case: List[str] = randn_tensor(A , generator=A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__snake_case: Dict = self.scheduler.schedule[t]
__snake_case: str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__snake_case: Any = self.scheduler.add_noise_to_input(A , A , generator=A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__snake_case: int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__snake_case: Any = self.scheduler.step(A , A , A , A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__snake_case: List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__snake_case: str = self.scheduler.step_correct(
A , A , A , A , step_output.prev_sample , step_output["""derivative"""] , )
__snake_case: Any = step_output.prev_sample
__snake_case: Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__snake_case: str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case: List[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 356 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Tuple = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Any = _readaa(_UpperCAmelCase )
lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase : Any = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
lowerCAmelCase : Optional[int] = data.reshape(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, 1 )
return data
@deprecated(_UpperCAmelCase, 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = labels_dense.shape[0]
lowerCAmelCase : Union[str, Any] = numpy.arange(_UpperCAmelCase ) * num_classes
lowerCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase : List[str] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=10 ) -> List[str]:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Dict = bytestream.read(_UpperCAmelCase )
lowerCAmelCase : Dict = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase, _UpperCAmelCase )
return labels
class __A :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase , lowerCAmelCase : int = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase : List[str] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase : Dict = 10000
lowerCAmelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase : Optional[int] = images.astype(numpy.floataa )
lowerCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCAmelCase : List[str] = images
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
@property
def lowercase__ ( self : str ):
return self._images
@property
def lowercase__ ( self : Dict ):
return self._labels
@property
def lowercase__ ( self : List[Any] ):
return self._num_examples
@property
def lowercase__ ( self : Any ):
return self._epochs_completed
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True ):
if fake_data:
lowerCAmelCase : Union[str, Any] = [1] * 784
lowerCAmelCase : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCAmelCase : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perma]
lowerCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase : Tuple = self._num_examples - start
lowerCAmelCase : Union[str, Any] = self._images[start : self._num_examples]
lowerCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perm]
lowerCAmelCase : Optional[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = batch_size - rest_num_examples
lowerCAmelCase : int = self._index_in_epoch
lowerCAmelCase : Union[str, Any] = self._images[start:end]
lowerCAmelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase, 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase, _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowerCAmelCase : List[Any] = f.size()
print('Successfully downloaded', _UpperCAmelCase, _UpperCAmelCase, 'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=dtypes.floataa, _UpperCAmelCase=True, _UpperCAmelCase=5_000, _UpperCAmelCase=None, _UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_UpperCAmelCase, one_hot=_UpperCAmelCase, dtype=_UpperCAmelCase, seed=_UpperCAmelCase )
lowerCAmelCase : Tuple = fake()
lowerCAmelCase : Optional[Any] = fake()
lowerCAmelCase : List[Any] = fake()
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
if not source_url: # empty string check
lowerCAmelCase : Any = DEFAULT_SOURCE_URL
lowerCAmelCase : Optional[Any] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase : Any = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase : int = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase : str = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Any = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Tuple = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[Any] = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[str] = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowerCAmelCase : str = (
'Validation size should be between 0 and '
f"{len(_UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : str = train_images[:validation_size]
lowerCAmelCase : Dict = train_labels[:validation_size]
lowerCAmelCase : List[str] = train_images[validation_size:]
lowerCAmelCase : str = train_labels[validation_size:]
lowerCAmelCase : str = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase : int = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
| 138 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Any = TypeVar('''T''')
class __A ( Generic[T] ):
def __init__( self : Dict , UpperCAmelCase_ : list[T] , UpperCAmelCase_ : Callable[[T, T], T] ):
lowerCAmelCase : Any | T = None
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase : List[Any] = fnc
self.build()
def lowercase__ ( self : str ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : T ):
p += self.N
lowerCAmelCase : int = v
while p > 1:
lowerCAmelCase : List[Any] = p // 2
lowerCAmelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): # noqa: E741
lowerCAmelCase , lowerCAmelCase : str = l + self.N, r + self.N
lowerCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase : Any = self.st[l] if res is None else self.fn(UpperCAmelCase_ , self.st[l] )
if r % 2 == 0:
lowerCAmelCase : Optional[int] = self.st[r] if res is None else self.fn(UpperCAmelCase_ , self.st[r] )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Optional[int] = SegmentTree(test_array, min)
__A : Optional[int] = SegmentTree(test_array, max)
__A : Dict = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase, len(_UpperCAmelCase ) ):
lowerCAmelCase : str = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : Dict = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : str = reduce(lambda _UpperCAmelCase, _UpperCAmelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__A : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 138 | 1 |
def UpperCamelCase ( _A : int )-> int:
"""simple docstring"""
A__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( _A : int = 100 )-> int:
"""simple docstring"""
A__ = 1
A__ = 2
for i in range(2 , max_n + 1 ):
A__ = pre_numerator
A__ = 2 * i // 3 if i % 3 == 0 else 1
A__ = cur_numerator
A__ = e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 362 |
from manim import *
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("CPU" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("GPU" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("Model" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
A__ = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCAmelCase__ , buff=0.0 )
self.add(UpperCAmelCase__ )
cpu_targs.append(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("Loaded Checkpoint" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , aligned_edge=UpperCAmelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A__ = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) )
self.play(Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) )
A__ = []
A__ = []
for i, rect in enumerate(UpperCAmelCase__ ):
A__ = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 )
target.move_to(UpperCAmelCase__ )
first_animations.append(GrowFromCenter(UpperCAmelCase__ , run_time=1 ) )
A__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(*UpperCAmelCase__ )
self.wait()
| 198 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
UpperCAmelCase_ :deque[T] # Cache store of keys
UpperCAmelCase_ :set[T] # References of the keys in cache
UpperCAmelCase_ :int = 10 # Maximum capacity of cache
def __init__( self , __A ) -> None:
lowerCAmelCase_ :Tuple = deque()
lowerCAmelCase_ :Any = set()
if not n:
lowerCAmelCase_ :str = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowerCAmelCase_ :Any = n
def __lowerCAmelCase ( self , __A ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase_ :int = self.dq_store.pop()
self.key_reference.remove(__A )
else:
self.dq_store.remove(__A )
self.dq_store.appendleft(__A )
self.key_reference.add(__A )
def __lowerCAmelCase ( self ) -> None:
for k in self.dq_store:
print(__A )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 84 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Optional[Any]=13 ,_snake_case : List[str]=7 ,_snake_case : List[Any]=True ,_snake_case : Optional[Any]=True ,_snake_case : Any=False ,_snake_case : str=True ,_snake_case : Optional[Any]=99 ,_snake_case : Optional[int]=32 ,_snake_case : str=5 ,_snake_case : Optional[int]=4 ,_snake_case : Dict=37 ,_snake_case : str="gelu" ,_snake_case : str=0.1 ,_snake_case : Any=0.1 ,_snake_case : Any=512 ,_snake_case : Dict=16 ,_snake_case : List[str]=2 ,_snake_case : Any=0.02 ,_snake_case : List[str]=3 ,_snake_case : str=4 ,_snake_case : Dict=None ,) -> Dict:
"""simple docstring"""
lowercase__ : Union[str, Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : List[Any] = use_input_mask
lowercase__ : List[Any] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : int = type_sequence_label_size
lowercase__ : List[str] = initializer_range
lowercase__ : Any = num_labels
lowercase__ : List[str] = num_choices
lowercase__ : int = scope
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Tuple = None
if self.use_input_mask:
lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[Any] = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ : Optional[Any] = None
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,use_stable_embedding=_snake_case ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : Dict ,_snake_case : List[Any] ,_snake_case : Any ,_snake_case : Dict ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,attention_mask=_snake_case )
lowercase__ : List[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : Tuple ,_snake_case : Any ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Any ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = True
lowercase__ : str = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : int = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : str = model(_snake_case ,attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : str ,_snake_case : Optional[Any] ,_snake_case : List[Any] ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Tuple ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : List[Any] ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = True
lowercase__ : int = True
lowercase__ : List[Any] = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowercase__ : List[str] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,)
lowercase__ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase__ : int = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase__ : str = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase__ : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase__ : Tuple = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
lowercase__ : Any = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
# select random slice
lowercase__ : Optional[int] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[int] = config_and_inputs
lowercase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCAmelCase : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Union[str, Any] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = OpenLlamaModelTester(self )
lowercase__ : Tuple = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Tuple = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : int = input_dict['''input_ids''']
lowercase__ : Dict = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Union[str, Any] = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = 3
lowercase__ : Any = '''single_label_classification'''
lowercase__ : Dict = input_dict['''input_ids''']
lowercase__ : Optional[int] = input_ids.ne(1 ).to(_snake_case )
lowercase__ : str = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Any = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = 3
lowercase__ : Optional[int] = '''multi_label_classification'''
lowercase__ : Dict = input_dict['''input_ids''']
lowercase__ : str = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Dict = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = ids_tensor([1, 10] ,config.vocab_size )
lowercase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : Optional[int] = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowercase__ : Union[str, Any] = original_model(_snake_case ).last_hidden_state
lowercase__ : Optional[int] = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase__ : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase__ : Dict = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowercase__ : Union[str, Any] = scaled_model(_snake_case ).last_hidden_state
lowercase__ : Dict = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case ,_snake_case ,atol=1e-5 ) )
| 302 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
snake_case_ : str = 42
snake_case_ : Optional[int] = 42
snake_case_ : Optional[Any] = 0.0
snake_case_ : int = 1
snake_case_ : List[str] = 1
snake_case_ : Union[str, Any] = True
snake_case_ : str = False
snake_case_ : Dict = False
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = jnp.floataa
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=_a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_a )
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_a )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
if self.add_downsample:
_UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any]=True ):
"""simple docstring"""
_UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_UpperCAmelCase = resnet(_a , _a , deterministic=_a )
_UpperCAmelCase = attn(_a , _a , deterministic=_a )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase = self.downsamplers_a(_a )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : List[str] = 42
snake_case_ : Dict = 42
snake_case_ : str = 0.0
snake_case_ : Union[str, Any] = 1
snake_case_ : Tuple = True
snake_case_ : List[str] = jnp.floataa
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=_a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_a )
_UpperCAmelCase = resnets
if self.add_downsample:
_UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any]=True ):
"""simple docstring"""
_UpperCAmelCase = ()
for resnet in self.resnets:
_UpperCAmelCase = resnet(_a , _a , deterministic=_a )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase = self.downsamplers_a(_a )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : Dict = 42
snake_case_ : List[str] = 42
snake_case_ : List[Any] = 42
snake_case_ : List[Any] = 0.0
snake_case_ : str = 1
snake_case_ : Tuple = 1
snake_case_ : Optional[Any] = True
snake_case_ : Any = False
snake_case_ : int = False
snake_case_ : Union[str, Any] = False
snake_case_ : Optional[Any] = jnp.floataa
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_a )
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_a )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
if self.add_upsample:
_UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[str]=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_UpperCAmelCase = res_hidden_states_tuple[-1]
_UpperCAmelCase = res_hidden_states_tuple[:-1]
_UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase = resnet(_a , _a , deterministic=_a )
_UpperCAmelCase = attn(_a , _a , deterministic=_a )
if self.add_upsample:
_UpperCAmelCase = self.upsamplers_a(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : Optional[int] = 42
snake_case_ : Optional[int] = 42
snake_case_ : Optional[int] = 42
snake_case_ : List[Any] = 0.0
snake_case_ : Optional[int] = 1
snake_case_ : Any = True
snake_case_ : str = jnp.floataa
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_a )
_UpperCAmelCase = resnets
if self.add_upsample:
_UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_UpperCAmelCase = res_hidden_states_tuple[-1]
_UpperCAmelCase = res_hidden_states_tuple[:-1]
_UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase = resnet(_a , _a , deterministic=_a )
if self.add_upsample:
_UpperCAmelCase = self.upsamplers_a(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : List[Any] = 42
snake_case_ : List[str] = 0.0
snake_case_ : str = 1
snake_case_ : Optional[int] = 1
snake_case_ : Optional[Any] = False
snake_case_ : int = False
snake_case_ : Union[str, Any] = jnp.floataa
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_UpperCAmelCase = []
for _ in range(self.num_layers ):
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_a )
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_a )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
def __call__( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str=True ):
"""simple docstring"""
_UpperCAmelCase = self.resnets[0](_a , _a )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_UpperCAmelCase = attn(_a , _a , deterministic=_a )
_UpperCAmelCase = resnet(_a , _a , deterministic=_a )
return hidden_states
| 133 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47 | 0 |
lowercase_ = 8.314_462 # Unit - J mol-1 K-1
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 1 |
from sklearn.metrics import fa_score
import datasets
__UpperCamelCase : List[str] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
__UpperCamelCase : Dict = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
__UpperCamelCase : Tuple = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowerCamelCase__ ( self :Any , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Optional[Any]=None , __magic_name__ :Optional[Any]=1 , __magic_name__ :Union[str, Any]="binary" , __magic_name__ :List[str]=None ):
'''simple docstring'''
a = fa_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase )
return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
| 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __a ( __UpperCamelCase ):
def A ( self : str , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str=None , **UpperCAmelCase : int ):
if tokenize_kwargs is None:
lowerCAmelCase_ : Dict = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
lowerCAmelCase_ : Union[str, Any] = truncation
lowerCAmelCase_ : List[str] = tokenize_kwargs
lowerCAmelCase_ : str = {}
if return_tensors is not None:
lowerCAmelCase_ : Any = return_tensors
return preprocess_params, {}, postprocess_params
def A ( self : List[str] , UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
lowerCAmelCase_ : List[str] = self.framework
lowerCAmelCase_ : List[Any] = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
return model_inputs
def A ( self : Any , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : str = self.model(**UpperCAmelCase )
return model_outputs
def A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
| 354 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int=3 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : Tuple=[10, 20, 30, 40] , lowerCamelCase__ : int=[1, 1, 2, 1] , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int="relu" , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : List[str]=None , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : int = image_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Optional[Any] = embeddings_size
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : str = depths
UpperCamelCase__ : Optional[int] = is_training
UpperCamelCase__ : List[Any] = use_labels
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Union[str, Any] = scope
UpperCamelCase__ : List[Any] = len(__lowerCAmelCase )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[int] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[str] = TFResNetModel(config=__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : int = self.num_labels
UpperCamelCase__ : Tuple = TFResNetForImageClassification(__lowerCAmelCase )
UpperCamelCase__ : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[Any] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( a__ , a__ , unittest.TestCase):
A: Dict = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A: Dict = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A: str = False
A: str = False
A: Union[str, Any] = False
A: Optional[int] = False
A: Any = False
def UpperCAmelCase__ ( self : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = TFResNetModelTester(self )
UpperCamelCase__ : Dict = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(__lowerCAmelCase )
UpperCamelCase__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ):
UpperCamelCase__ : Optional[int] = model_class(__lowerCAmelCase )
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
UpperCamelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : str = layer_type
UpperCamelCase__ : Optional[int] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : str = TFResNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ : Any = self.default_image_processor
UpperCamelCase__ : List[Any] = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : Optional[Any] = model(**__lowerCAmelCase )
# verify the logits
UpperCamelCase__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
UpperCamelCase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1E-4 ) )
| 146 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
super().__init__()
lowercase__ : List[str] = model
lowercase__ : Dict = 2
lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase( self ) -> str:
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load longformer model from model identifier
lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase )
lowercase__ : List[str] = LightningModel(UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 198 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__UpperCamelCase : List[str] = "__DUMMY_TRANSFORMERS_USER__"
__UpperCamelCase : Optional[int] = "Dummy User"
__UpperCamelCase : Union[str, Any] = "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"
__UpperCamelCase : Dict = "https://hub-ci.huggingface.co"
__UpperCamelCase : Tuple = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
__UpperCamelCase : Union[str, Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
__UpperCamelCase : Any = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __A ( __lowerCamelCase ) -> List[Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __lowerCamelCase )
@pytest.fixture
def __A ( __lowerCamelCase ) -> Dict:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __lowerCamelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __lowerCamelCase )
@pytest.fixture
def __A ( __lowerCamelCase ) -> Optional[Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __lowerCamelCase )
@pytest.fixture
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
HfFolder.save_token(__lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __A ( ) -> Any:
return HfApi(endpoint=__lowerCamelCase )
@pytest.fixture(scope="""session""" )
def __A ( __lowerCamelCase ) -> Union[str, Any]:
a = HfFolder.get_token()
HfFolder.save_token(__lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowerCamelCase )
@pytest.fixture
def __A ( __lowerCamelCase ) -> Dict:
def _cleanup_repo(__lowerCamelCase ):
hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __A ( __lowerCamelCase ) -> Tuple:
@contextmanager
def _temporary_repo(__lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(__lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = f'repo_txt_data-{int(time.time() * 1_0E3 )}'
a = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" , private=__lowerCamelCase )
hf_api.upload_file(
token=__lowerCamelCase , path_or_fileobj=str(__lowerCamelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__lowerCamelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
a = f'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}'
a = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" , private=__lowerCamelCase )
hf_api.upload_file(
token=__lowerCamelCase , path_or_fileobj=str(__lowerCamelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCamelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = f'repo_zipped_img_data-{int(time.time() * 1_0E3 )}'
a = f'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" , private=__lowerCamelCase )
hf_api.upload_file(
token=__lowerCamelCase , path_or_fileobj=str(__lowerCamelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCamelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCamelCase , token=__lowerCamelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 347 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 1 |
import math
import sys
import cva
import numpy as np
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
__a = math.sqrt(_SCREAMING_SNAKE_CASE )
__a = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
__a = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _SCREAMING_SNAKE_CASE ):
for j in range(0 , _SCREAMING_SNAKE_CASE ):
__a = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
__a = np.zeros(img.shape )
__a = get_gauss_kernel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a = get_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a = vec_gaussian(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = np.multiply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = np.multiply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = np.sum(_SCREAMING_SNAKE_CASE ) / np.sum(_SCREAMING_SNAKE_CASE )
__a = val
return imga
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
__a = args[1] if args[1:] else """../image_data/lena.jpg"""
__a = float(args[2] ) if args[2:] else 1.0
__a = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a = int(args[4] )
__a = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parse_args(sys.argv)
lowerCamelCase__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowerCamelCase__ = img / 255
lowerCamelCase__ = out.astype("""float32""")
lowerCamelCase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase__ = out * 255
lowerCamelCase__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 302 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __snake_case :
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase : List[Any] =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase : List[str] =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase : List[Any] =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple =DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.get_dummy_components()
UpperCAmelCase : List[str] =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] =self.get_dummy_inputs(snake_case__ )
UpperCAmelCase : Union[str, Any] =inputs['''prompt''']
UpperCAmelCase : Any =inputs['''generator''']
UpperCAmelCase : Optional[int] =inputs['''num_inference_steps''']
UpperCAmelCase : Tuple =inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase : Union[str, Any] =inputs['''image''']
else:
UpperCAmelCase : Tuple =None
if "mask_image" in inputs:
UpperCAmelCase : Union[str, Any] =inputs['''mask_image''']
else:
UpperCAmelCase : List[str] =None
if "original_image" in inputs:
UpperCAmelCase : Optional[Any] =inputs['''original_image''']
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase , UpperCAmelCase : List[Any] =pipe.encode_prompt(snake_case__ )
# inputs with prompt converted to embeddings
UpperCAmelCase : List[Any] ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase : Any =image
if mask_image is not None:
UpperCAmelCase : Dict =mask_image
if original_image is not None:
UpperCAmelCase : Optional[int] =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase : Tuple =pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case__ , snake_case__ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Optional[int] =self.get_dummy_inputs(snake_case__ )
UpperCAmelCase : List[Any] =inputs['''generator''']
UpperCAmelCase : int =inputs['''num_inference_steps''']
UpperCAmelCase : Any =inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase : Union[str, Any] ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase : List[Any] =image
if mask_image is not None:
UpperCAmelCase : Union[str, Any] =mask_image
if original_image is not None:
UpperCAmelCase : List[Any] =original_image
UpperCAmelCase : Tuple =pipe_loaded(**snake_case__ )[0]
UpperCAmelCase : Dict =np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.get_dummy_components()
UpperCAmelCase : Dict =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Tuple =self.get_dummy_inputs(snake_case__ )
UpperCAmelCase : str =pipe(**snake_case__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case__ )
UpperCAmelCase : Optional[Any] =self.pipeline_class.from_pretrained(snake_case__ )
pipe_loaded.to(snake_case__ )
pipe_loaded.set_progress_bar_config(disable=snake_case__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase : List[Any] =self.get_dummy_inputs(snake_case__ )
UpperCAmelCase : Optional[Any] =pipe_loaded(**snake_case__ )[0]
UpperCAmelCase : str =np.abs(to_np(snake_case__ ) - to_np(snake_case__ ) ).max()
self.assertLess(snake_case__ , 1e-4 )
| 78 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __snake_case ( lowerCAmelCase ):
_a : int= "encodec"
def __init__( self ,snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] ,snake_case=24000 ,snake_case=1 ,snake_case=False ,snake_case=None ,snake_case=None ,snake_case=128 ,snake_case=32 ,snake_case=1 ,snake_case=[8, 5, 4, 2] ,snake_case="weight_norm" ,snake_case=7 ,snake_case=7 ,snake_case=3 ,snake_case=2 ,snake_case=True ,snake_case="reflect" ,snake_case=2 ,snake_case=2 ,snake_case=1.0 ,snake_case=1024 ,snake_case=None ,snake_case=True ,**snake_case ,):
'''simple docstring'''
lowercase : Tuple = target_bandwidths
lowercase : int = sampling_rate
lowercase : List[str] = audio_channels
lowercase : Tuple = normalize
lowercase : Optional[Any] = chunk_length_s
lowercase : List[str] = overlap
lowercase : List[Any] = hidden_size
lowercase : Tuple = num_filters
lowercase : Dict = num_residual_layers
lowercase : str = upsampling_ratios
lowercase : str = norm_type
lowercase : List[Any] = kernel_size
lowercase : Tuple = last_kernel_size
lowercase : Any = residual_kernel_size
lowercase : Union[str, Any] = dilation_growth_rate
lowercase : Union[str, Any] = use_causal_conv
lowercase : int = pad_mode
lowercase : List[str] = compress
lowercase : List[str] = num_lstm_layers
lowercase : Union[str, Any] = trim_right_ratio
lowercase : List[Any] = codebook_size
lowercase : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
lowercase : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**snake_case )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( lowerCAmelCase ):
_a : BigBirdConfig
_a : jnp.dtype= jnp.floataa
_a : bool= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setup()
lowercase : List[str] = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = super().__call__(*snake_case ,**snake_case )
lowercase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( lowerCAmelCase ):
_a : List[Any]= FlaxBigBirdForNaturalQuestionsModule
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
def cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : int = logits.shape[-1]
lowercase : Dict = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE__ )[None]).astype("""f4""" )
lowercase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
lowercase : Optional[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase : Any = reduction(SCREAMING_SNAKE_CASE__ )
return loss
lowercase : Optional[Any] = partial(SCREAMING_SNAKE_CASE__ , reduction=jnp.mean )
lowercase : Optional[int] = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = cross_entropy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
_a : str= "google/bigbird-roberta-base"
_a : int= 3000
_a : int= 1_0500
_a : int= 128
_a : int= 3
_a : int= 1
_a : int= 5
# tx_args
_a : float= 3E-5
_a : float= 0.0
_a : int= 2_0000
_a : float= 0.00_95
_a : str= "bigbird-roberta-natural-questions"
_a : str= "training-expt"
_a : str= "data/nq-training.jsonl"
_a : str= "data/nq-validation.jsonl"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
os.makedirs(self.base_dir ,exist_ok=snake_case )
lowercase : Optional[int] = os.path.join(self.base_dir ,self.save_dir )
lowercase : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
_a : int
_a : int= 4096 # no dynamic padding on TPUs
def __call__( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.collate_fn(snake_case )
lowercase : Union[str, Any] = jax.tree_util.tree_map(snake_case ,snake_case )
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] = self.fetch_inputs(features["""input_ids"""] )
lowercase : Tuple = {
"""input_ids""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(snake_case ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Any:
if seed is not None:
lowercase : Optional[int] = dataset.shuffle(seed=SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) // batch_size ):
lowercase : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE__ )
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
def loss_fn(SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = model_inputs.pop("""start_labels""" )
lowercase : Optional[int] = model_inputs.pop("""end_labels""" )
lowercase : str = model_inputs.pop("""pooled_labels""" )
lowercase : Union[str, Any] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , dropout_rng=SCREAMING_SNAKE_CASE__ , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[str] = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
lowercase , lowercase : int = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = jax.value_and_grad(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = grad_fn(state.params )
lowercase : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase : List[Any] = jax.lax.pmean(SCREAMING_SNAKE_CASE__ , """batch""" )
lowercase : str = state.apply_gradients(grads=SCREAMING_SNAKE_CASE__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : int = model_inputs.pop("""start_labels""" )
lowercase : Dict = model_inputs.pop("""end_labels""" )
lowercase : Optional[Any] = model_inputs.pop("""pooled_labels""" )
lowercase : Optional[int] = state.apply_fn(**SCREAMING_SNAKE_CASE__ , params=state.params , train=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase , lowercase : List[Any] = outputs
lowercase : Dict = state.loss_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __snake_case ( train_state.TrainState ):
_a : Callable= struct.field(pytree_node=lowerCAmelCase )
@dataclass
class __snake_case :
_a : Args
_a : Callable
_a : Callable
_a : Callable
_a : Callable
_a : wandb
_a : Callable= None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = model.params
lowercase : Any = TrainState.create(
apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,loss_fn=snake_case ,)
if ckpt_dir is not None:
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = restore_checkpoint(snake_case ,snake_case )
lowercase : List[str] = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase , lowercase : Tuple = build_tx(**snake_case )
lowercase : str = train_state.TrainState(
step=snake_case ,apply_fn=model.__call__ ,params=snake_case ,tx=snake_case ,opt_state=snake_case ,)
lowercase : Any = args
lowercase : Optional[Any] = data_collator
lowercase : List[str] = lr
lowercase : str = params
lowercase : Tuple = jax_utils.replicate(snake_case )
return state
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.args
lowercase : Optional[Any] = len(snake_case ) // args.batch_size
lowercase : int = jax.random.PRNGKey(0 )
lowercase : List[str] = jax.random.split(snake_case ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : List[str] = get_batched_dataset(snake_case ,args.batch_size ,seed=snake_case )
lowercase : int = 0
for batch in tqdm(snake_case ,total=snake_case ,desc=f"Running EPOCH-{epoch}" ):
lowercase : Dict = self.data_collator(snake_case )
lowercase , lowercase , lowercase : Optional[int] = self.train_step_fn(snake_case ,snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowercase : Optional[Any] = jax_utils.unreplicate(state.step )
lowercase : List[str] = running_loss.item() / i
lowercase : List[str] = self.scheduler_fn(state_step - 1 )
lowercase : int = self.evaluate(snake_case ,snake_case )
lowercase : Tuple = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case ,commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" ,state=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = get_batched_dataset(snake_case ,self.args.batch_size )
lowercase : Any = len(snake_case ) // self.args.batch_size
lowercase : List[Any] = jnp.array(0 ,dtype=jnp.floataa )
lowercase : Optional[int] = 0
for batch in tqdm(snake_case ,total=snake_case ,desc="""Evaluating ... """ ):
lowercase : Tuple = self.data_collator(snake_case )
lowercase : Optional[int] = self.val_step_fn(snake_case ,**snake_case )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = jax_utils.unreplicate(snake_case )
print(f"SAVING CHECKPOINT IN {save_dir}" ,end=""" ... """ )
self.model_save_fn(snake_case ,params=state.params )
with open(os.path.join(snake_case ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(snake_case ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(snake_case ,"""data_collator.joblib""" ) )
with open(os.path.join(snake_case ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,snake_case )
print("""DONE""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase : str = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase : Optional[int] = from_bytes(state.opt_state , f.read() )
lowercase : Optional[Any] = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """args.joblib""" ) )
lowercase : int = joblib.load(os.path.join(SCREAMING_SNAKE_CASE__ , """data_collator.joblib""" ) )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """training_state.json""" ) , """r""" ) as f:
lowercase : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : List[str] = num_train_steps - warmup_steps
lowercase : Dict = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=SCREAMING_SNAKE_CASE__ , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE__ , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
def weight_decay_mask(SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = scheduler_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay=SCREAMING_SNAKE_CASE__ , mask=SCREAMING_SNAKE_CASE__ )
return tx, lr
| 20 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__snake_case = get_tests_dir("""fixtures""")
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : int ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def A_ ( self : List[Any] ):
with self.assertRaises(UpperCAmelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(UpperCAmelCase_ )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
@classmethod
def A_ ( cls : List[str] ):
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def A_ ( cls : List[str] ):
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(UpperCAmelCase_ )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id='test-image-processor' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(UpperCAmelCase_ )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-image-processor-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def A_ ( self : str ):
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomImageProcessor.from_pretrained(UpperCAmelCase_ )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 357 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__snake_case = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__snake_case = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
__snake_case = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
__snake_case = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__snake_case = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
__snake_case = tf.keras.preprocessing.image.img_to_array(test_image)
__snake_case = np.expand_dims(test_image, axis=0)
__snake_case = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__snake_case = """Normal"""
if result[0][0] == 1:
__snake_case = """Abnormality detected"""
| 169 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _a,unittest.TestCase ):
_a = GPTSwaTokenizer
_a = False
_a = True
_a = False
def a__ ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Optional[Any] = GPTSwaTokenizer(UpperCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self , _a ) -> Tuple:
_A : List[str] = """This is a test"""
_A : List[Any] = """This is a test"""
return input_text, output_text
def a__ ( self ) -> Dict:
_A : List[Any] = """<s>"""
_A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 2000 )
def a__ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def a__ ( self ) -> List[Any]:
_A : int = GPTSwaTokenizer(UpperCamelCase__ )
_A : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [465, 287, 265, 631, 842] )
_A : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
_A : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_A : int = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def a__ ( self ) -> List[Any]:
_A : Any = GPTSwaTokenizer(UpperCamelCase__ )
_A : Union[str, Any] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
_A : Dict = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def a__ ( self ) -> Optional[int]:
_A : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_A : Any = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=UpperCamelCase__ , )
| 26 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if hor == 1_28:
snake_case_ : Tuple = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
snake_case_ : Tuple = (32, 1_28, 2_56)
snake_case_ : Dict = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
snake_case_ : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
snake_case_ : Union[str, Any] = (32, 64, 1_28, 2_56)
snake_case_ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
snake_case_ : Dict = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
snake_case_ : str = model.state_dict()
snake_case_ : int = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
snake_case_ : Optional[Any] = UNetaDModel(**lowerCamelCase_ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case_ : str = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ : List[Any] = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
snake_case_ : int = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
snake_case_ : int = model
snake_case_ : Dict = UNetaDModel(**lowerCamelCase_ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case_ : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ : List[str] = state_dict.pop(lowerCamelCase_ )
hf_value_function.load_state_dict(lowerCamelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 8 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : str = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : int = min_resolution
snake_case_ : int = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = apply_ocr
def a__ ( self :Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self :int ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Any ):
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
def a__ ( self :Optional[Any] ):
pass
def a__ ( self :Union[str, Any] ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_UpperCamelCase )
self.assertIsInstance(encoding.boxes ,_UpperCamelCase )
# Test batched
snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Tuple ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
# Initialize image_processing
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[Any] ):
# with apply_OCR = True
snake_case_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCamelCase )
self.assertListEqual(encoding.boxes ,_UpperCamelCase )
# with apply_OCR = False
snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
| 8 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__SCREAMING_SNAKE_CASE : List[str] = '__DUMMY_TRANSFORMERS_USER__'
__SCREAMING_SNAKE_CASE : Dict = 'Dummy User'
__SCREAMING_SNAKE_CASE : Tuple = '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'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'https://hub-ci.huggingface.co'
__SCREAMING_SNAKE_CASE : List[str] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__SCREAMING_SNAKE_CASE : Optional[int] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def _a ( ) -> List[str]:
return HfApi(endpoint=_SCREAMING_SNAKE_CASE )
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = HfFolder.get_token()
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
def _cleanup_repo(_SCREAMING_SNAKE_CASE ):
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
@contextmanager
def _temporary_repo(_SCREAMING_SNAKE_CASE ):
try:
yield repo_id
finally:
cleanup_repo(_SCREAMING_SNAKE_CASE )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = f"""repo_txt_data-{int(time.time() * 1_0E3 )}"""
snake_case_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data/text_data.txt""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = f"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"""
snake_case_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data.zip""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = f"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}"""
snake_case_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , private=_SCREAMING_SNAKE_CASE )
hf_api.upload_file(
token=_SCREAMING_SNAKE_CASE , path_or_fileobj=str(_SCREAMING_SNAKE_CASE ) , path_in_repo="""data.zip""" , repo_id=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return hf_private_dataset_repo_zipped_img_data_
| 347 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
snake_case_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# Create a copy of the model
with init_empty_weights():
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] )
snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
snake_case_ = [""".weight""", """.bias"""]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return next(parameter.parameters() ).device
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 347 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
UpperCAmelCase_ = {
'facebook/xglm-564M': 2_0_4_8,
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : List[Any]="<unk>" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase__ = 7
UpperCAmelCase__ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
UpperCAmelCase__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
UpperCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase__ = len(self.sp_model )
UpperCAmelCase__ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase ))
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase ))
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : str ):
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 61 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ = s.rsplit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return new.join(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = {}
UpperCAmelCase__ = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase__ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
UpperCAmelCase__ = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
UpperCAmelCase__ = rreplace(SCREAMING_SNAKE_CASE__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
UpperCAmelCase__ = rreplace(SCREAMING_SNAKE_CASE__ , """.b""" , """.bias""" , 1 )
UpperCAmelCase__ = value.float()
return upgrade
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase__ = Encoder()
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = ckpt.state_dict()
encoder.load_state_dict(SCREAMING_SNAKE_CASE__ )
if config_path is not None:
UpperCAmelCase__ = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = FlavaImageCodebookConfig()
UpperCAmelCase__ = FlavaImageCodebook(SCREAMING_SNAKE_CASE__ ).eval()
UpperCAmelCase__ = encoder.state_dict()
UpperCAmelCase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE__ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = hf_model.state_dict()
UpperCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 61 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def _a ( SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , SCREAMING_SNAKE_CASE )
print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 51 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
UpperCamelCase__ : List[Any] = tf.shape(SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ):
return dynamic
UpperCamelCase__ : Optional[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )]
def _a ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=1E-5 , SCREAMING_SNAKE_CASE : Any=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCamelCase__ , UpperCamelCase__ : Dict = tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCamelCase__ : Tuple = [1] * inputs.shape.rank
UpperCamelCase__ : List[str] = shape_list(SCREAMING_SNAKE_CASE )[axis]
UpperCamelCase__ : Union[str, Any] = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
UpperCamelCase__ : List[Any] = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , )
return outputs
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Tuple=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCamelCase__ : Union[str, Any] = tf.shape(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCamelCase__ : Tuple = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : tf.Tensor ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
UpperCamelCase__ : int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCamelCase__ : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCamelCase__ : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCamelCase__ : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ : str = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCamelCase__ : List[str] = [x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
UpperCamelCase__ : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = 1
UpperCamelCase__ : Tuple = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCamelCase__ : Any = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = chunk_data
else:
UpperCamelCase__ : List[Any] = data
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if name in group.attrs:
UpperCamelCase__ : List[Any] = [n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Union[str, Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE : str ):
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
| 51 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num < 0:
return False
__UpperCAmelCase : Optional[int] = num
__UpperCAmelCase : Union[str, Any] = 0
while num > 0:
__UpperCAmelCase : Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :AutoencoderKL , lowerCamelCase :CLIPTextModel , lowerCamelCase :CLIPTokenizer , lowerCamelCase :UNetaDConditionModel , lowerCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase :StableDiffusionSafetyChecker , lowerCamelCase :CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self :int , lowerCamelCase :Union[str, List[str]] , lowerCamelCase :int = 512 , lowerCamelCase :int = 512 , lowerCamelCase :int = 50 , lowerCamelCase :float = 7.5 , lowerCamelCase :Optional[Union[str, List[str]]] = None , lowerCamelCase :Optional[int] = 1 , lowerCamelCase :float = 0.0 , lowerCamelCase :Optional[torch.Generator] = None , lowerCamelCase :Optional[torch.FloatTensor] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , lowerCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase :int = 1 , lowerCamelCase :Optional[torch.FloatTensor] = None , **lowerCamelCase :List[str] , ) -> str:
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase )}.''' )
# get prompt text embeddings
UpperCAmelCase__ = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_embeddings.shape
UpperCAmelCase__ = text_embeddings.repeat(1 , lowerCamelCase , 1 )
UpperCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ = 42
if negative_prompt is None:
UpperCAmelCase__ = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
UpperCAmelCase__ = negative_prompt
UpperCAmelCase__ = text_input_ids.shape[-1]
UpperCAmelCase__ = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
UpperCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ = uncond_embeddings.shape[1]
UpperCAmelCase__ = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
UpperCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase__ = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
UpperCAmelCase__ = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase__ = latents_reference.to(self.device )
UpperCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase__ = 0 if dx < 0 else dx
UpperCAmelCase__ = 0 if dy < 0 else dy
UpperCAmelCase__ = max(-dx , 0 )
UpperCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ = {}
if accepts_eta:
UpperCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 1 / 0.1_82_15 * latents
UpperCAmelCase__ = self.vae.decode(lowerCamelCase ).sample
UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase__ = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
UpperCAmelCase__ , UpperCAmelCase__ = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase__ = None
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 169 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ['audio_values', 'audio_mask']
def __init__( self :Dict , a :Any=2_0_4_8 , a :str=1 , a :Optional[Any]=[1_6, 1_6] , a :str=1_2_8 , a :int=4_4_1_0_0 , a :Tuple=8_6 , a :Optional[Any]=2_0_4_8 , a :List[Any]=0.0 , **a :Tuple , ) -> Tuple:
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
__UpperCamelCase : List[str] = spectrogram_length
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Tuple = patch_size
__UpperCamelCase : List[str] = feature_size // self.patch_size[1]
__UpperCamelCase : str = n_fft
__UpperCamelCase : List[Any] = sampling_rate // hop_length_to_sampling_rate
__UpperCamelCase : List[str] = sampling_rate
__UpperCamelCase : Optional[int] = padding_value
__UpperCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a , norm="slaney" , mel_scale="slaney" , ).T
def _lowerCamelCase ( self :Optional[Any] , a :np.array ) -> np.ndarray:
__UpperCamelCase : List[str] = spectrogram(
a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__UpperCamelCase : int = log_spec[:, :-1]
__UpperCamelCase : Optional[int] = log_spec - 20.0
__UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self :List[Any] , a :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a :Optional[Union[str, TensorType]] = None , a :Optional[bool] = True , a :Optional[int] = None , a :bool = False , a :bool = False , **a :List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCamelCase : Optional[Any] = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__UpperCamelCase : str = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
__UpperCamelCase : Any = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCamelCase : Tuple = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
__UpperCamelCase : Union[str, Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCamelCase : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCamelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCamelCase : str = np.array(a ).astype(np.floataa )
# convert into correct format for padding
__UpperCamelCase : Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCamelCase : List[Any] = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(a ) ):
__UpperCamelCase : int = audio_features[i]
__UpperCamelCase : Tuple = feature
# return as BatchFeature
if return_attention_mask:
__UpperCamelCase : Dict = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__UpperCamelCase : Dict = {"audio_values": padded_audio_features}
__UpperCamelCase : Union[str, Any] = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 362 |
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 2) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCamelCase : List[str] = qubits
# Using Aer's simulator
__UpperCamelCase : int = qiskit.Aer.get_backend("aer_simulator")
# Creating a Quantum Circuit acting on the q register
__UpperCamelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 , _lowerCamelCase):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase)) , list(range(_lowerCamelCase)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__UpperCamelCase : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000)
return job.result().get_counts(_lowerCamelCase)
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 151 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if hor == 128:
snake_case_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
snake_case_ = (32, 128, 256)
snake_case_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
snake_case_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
snake_case_ = (32, 64, 128, 256)
snake_case_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
snake_case_ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
snake_case_ = model.state_dict()
snake_case_ = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
snake_case_ = UNetaDModel(**SCREAMING_SNAKE_CASE__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
snake_case_ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
snake_case_ = model
snake_case_ = UNetaDModel(**SCREAMING_SNAKE_CASE__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 8 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
| 8 | 1 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase : Optional[Any] = get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A : str = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A : List[str] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A : Any = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A : List[str] = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A : List[Any] = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A : int = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving model to {ckpt_dir}' )
A : Optional[int] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
A : str = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A : List[str] = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A : List[Any] = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A : Any = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A : int = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A : Optional[int] = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A : Optional[int] = (
os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A : Optional[int] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
A : Union[str, Any] = state_dict['''model''']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A : Any = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A : Optional[Any] = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A : Dict = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A : Any = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A : Tuple = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A : Optional[Any] = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A : Optional[int] = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A : List[Any] = torch.load(snake_case__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A : Dict = (
os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A : str = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
A : Optional[int] = optim_state['''optimizer''']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A : List[Any] = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ )
| 311 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : int = use_input_mask
A : Union[str, Any] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : str = intermediate_size
A : Tuple = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Optional[int] = initializer_range
A : Any = use_labels
A : Optional[int] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
),
) : Any = self.prepare_config_and_inputs()
A : Tuple = True
A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
A : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
A : List[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = True
A : Tuple = True
A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
# first forward pass
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , )
A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
A : str = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
A : Any = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0]
# select random slice
A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A, A, A, A : Optional[int] = self.prepare_config_and_inputs()
A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__magic_name__ = (BertGenerationDecoder,) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = BertGenerationEncoderTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs()
A : str = '''bert'''
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Dict = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0]
A : Optional[Any] = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Any = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 311 | 1 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 61 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = '''gpt_neo'''
snake_case__ = ['''past_key_values''']
snake_case__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _snake_case=5_02_57 , _snake_case=20_48 , _snake_case=20_48 , _snake_case=24 , _snake_case=[[["global", "local"], 12]] , _snake_case=16 , _snake_case=None , _snake_case=2_56 , _snake_case="gelu_new" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=1E-5 , _snake_case=0.02 , _snake_case=True , _snake_case=5_02_56 , _snake_case=5_02_56 , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_layers
lowerCAmelCase = num_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = window_size
lowerCAmelCase = activation_function
lowerCAmelCase = resid_dropout
lowerCAmelCase = embed_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = classifier_dropout
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = attention_types
lowerCAmelCase = self.expand_attention_types_params(__a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@staticmethod
def UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
import torch
lowerCAmelCase = input.size()
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = shape[dimension]
lowerCAmelCase = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor' ) + 1
lowerCAmelCase = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None]
lowerCAmelCase = [slice(_UpperCAmelCase )] * rank
lowerCAmelCase = indices
lowerCAmelCase = input[s]
lowerCAmelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ):
import torch
lowerCAmelCase = torch.arange(1 , _UpperCAmelCase )
lowerCAmelCase = torch.remainder(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = remainders == 0
lowerCAmelCase = candidates[divisor_indices]
lowerCAmelCase = torch.max(_UpperCAmelCase )
return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor' )
class a ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._config.num_heads
def UpperCamelCase__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ):
"""simple docstring"""
lowerCAmelCase = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase = seqlen + 2
lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase = ordered_inputs['attention_mask'].dtype
lowerCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 13
| 350 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {"tokenizer_file": "tokenizer.json"}
snake_case_ : Tuple = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class __snake_case ( a ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[str] = None
def __init__( self : int , _snake_case : List[Any]=None , _snake_case : Dict=None , _snake_case : str=None , _snake_case : int="<unk>" , _snake_case : Dict="<s>" , _snake_case : List[str]="</s>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[Any]=False , _snake_case : int=False , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , add_prefix_space=_snake_case , clean_up_tokenization_spaces=_snake_case , **_snake_case , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , _snake_case) != add_prefix_space:
UpperCAmelCase_ = getattr(_snake_case , pre_tok_state.pop('''type'''))
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**_snake_case)
UpperCAmelCase_ = add_prefix_space
def lowerCamelCase ( self : Optional[Any] , *_snake_case : Optional[int] , **_snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _snake_case)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*_snake_case , **_snake_case)
def lowerCamelCase ( self : int , *_snake_case : str , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , _snake_case)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
UpperCAmelCase_ = self._tokenizer.model.save(_snake_case , name=_snake_case)
return tuple(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : "Conversation"):
"""simple docstring"""
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case) + [self.eos_token_id])
if len(_snake_case) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 51 |
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__A = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__A = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__A = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
if return_pvalue:
_lowerCAmelCase =pearsonr(__UpperCAmelCase , __UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : str = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ : List[str] = {
'openbmb/cpm-ant-10b': 10_24,
}
def _lowerCamelCase ( lowercase : Any ) -> List[str]:
_a = collections.OrderedDict()
with open(UpperCAmelCase_ , "r" , encoding="utf-8" ) as reader:
_a = reader.readlines()
for index, token in enumerate(UpperCAmelCase_ ):
_a = token.rstrip("\n" )
_a = index
return vocab
class __SCREAMING_SNAKE_CASE (_snake_case ):
"""simple docstring"""
def __init__( self : Tuple , __a : List[Any] , __a : Optional[Any]="<unk>" , __a : Optional[int]=2_00 ):
_a = vocab
_a = unk_token
_a = max_input_chars_per_word
def UpperCamelCase__ ( self : Dict , __a : List[str] ):
_a = list(lowercase_ )
if len(lowercase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
_a = 0
_a = []
while start < len(lowercase_ ):
_a = len(lowercase_ )
_a = None
while start < end:
_a = ''.join(chars[start:end] )
if substr in self.vocab:
_a = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase_ )
_a = end
return sub_tokens
class __SCREAMING_SNAKE_CASE (_snake_case ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =["""input_ids""", """attention_mask"""]
__a =False
def __init__( self : Any , __a : Union[str, Any] , __a : Union[str, Any]="<d>" , __a : int="</d>" , __a : Optional[int]="<s>" , __a : List[Any]="</s>" , __a : Dict="<pad>" , __a : Dict="<unk>" , __a : Any="</n>" , __a : List[Any]="</_>" , __a : Dict="left" , **__a : List[Any] , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowercase_ , eod_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , unk_token=lowercase_ , line_token=lowercase_ , space_token=lowercase_ , padding_side=lowercase_ , **lowercase_ , )
_a = bod_token
_a = eod_token
_a = load_vocab(lowercase_ )
_a = self.encoder[space_token]
_a = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __a : x[1] ) )
_a = {v: k for k, v in self.encoder.items()}
_a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCamelCase__ ( self : str ):
return self.encoder[self.bod_token]
@property
def UpperCamelCase__ ( self : Any ):
return self.encoder[self.eod_token]
@property
def UpperCamelCase__ ( self : Tuple ):
return self.encoder["\n"]
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
def UpperCamelCase__ ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : List[str] , __a : List[str] ):
_a = []
for x in jieba.cut(lowercase_ , cut_all=lowercase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) )
return output_tokens
def UpperCamelCase__ ( self : Tuple , __a : Tuple , **__a : str ):
_a = [i for i in token_ids if i >= 0]
_a = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] ):
return token in self.encoder
def UpperCamelCase__ ( self : List[str] , __a : List[str] ):
return "".join(lowercase_ )
def UpperCamelCase__ ( self : Dict , __a : List[Any] ):
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : str , __a : Tuple ):
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCamelCase__ ( self : Dict , __a : str , __a : Optional[str] = None ):
if os.path.isdir(lowercase_ ):
_a = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_a = (filename_prefix + '-' if filename_prefix else '') + save_directory
_a = 0
if " " in self.encoder:
_a = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
_a = self.encoder['\n']
del self.encoder["\n"]
_a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __a : x[1] ) )
with open(lowercase_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
_a = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def UpperCamelCase__ ( self : Dict , __a : List[int] , __a : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCamelCase__ ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ ))
| 63 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=32 * 4 , lowercase_ : str=32 * 6 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=32 , ) -> Optional[int]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = is_training
UpperCAmelCase : int = use_auxiliary_loss
UpperCAmelCase : List[Any] = num_queries
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[str] = min_size
UpperCAmelCase : Dict = max_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : str = mask_feature_size
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
UpperCAmelCase : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : int = output.encoder_hidden_states
UpperCAmelCase : Any = output.pixel_decoder_hidden_states
UpperCAmelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Tuple:
with torch.no_grad():
UpperCAmelCase : str = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
comm_check_on_output(lowercase_ )
UpperCAmelCase : Any = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Optional[Any] = MaskFormerModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase : Tuple = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase : str = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_ ),
'class_labels': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
UpperCAmelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
UpperCAmelCase : Optional[int] = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ ).to(lowercase_ )
UpperCAmelCase : List[Any] = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Dict = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : Tuple = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[str] ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Optional[int] = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : List[str] = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ = 1e-4
def UpperCamelCase( ):
UpperCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**lowercase_ )
UpperCAmelCase : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase : Optional[int] = inputs['pixel_values'].to(lowercase_ )
UpperCAmelCase : Optional[Any] = [el.to(lowercase_ ) for el in inputs['mask_labels']]
UpperCAmelCase : List[str] = [el.to(lowercase_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 151 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 42 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase__ ( _a , _a ):
@register_to_config
def __init__( self : str , _a : int = 7_6_8 , ):
super().__init__()
a__: Optional[Any] =nn.Parameter(torch.zeros(1 , _a ) )
a__: List[str] =nn.Parameter(torch.ones(1 , _a ) )
def _lowerCamelCase ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ):
a__: str =nn.Parameter(self.mean.to(_a ).to(_a ) )
a__: List[Any] =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def _lowerCamelCase ( self : List[Any] , _a : Dict ):
a__: str =(embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: Optional[Any] =(embeds * self.std) + self.mean
return embeds
| 42 | 1 |
'''simple docstring'''
import re
def lowercase ( __magic_name__ ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
try:
UpperCAmelCase : Optional[int] = split_input(__magic_name__ )
if upper:
UpperCAmelCase : str = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase : Union[str, Any] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowercase ( __magic_name__ ):
'''simple docstring'''
return to_simple_case(__magic_name__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
try:
UpperCAmelCase : List[str] = to_simple_case(__magic_name__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return to_complex_case(__magic_name__ , __magic_name__ , "_" )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return to_complex_case(__magic_name__ , __magic_name__ , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 311 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311 | 1 |
"""simple docstring"""
def a__ ( snake_case__ ) -> Dict:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCamelCase = len(snake_case__ )
lowerCamelCase = max(snake_case__ )
lowerCamelCase = min(snake_case__ )
# create the counting array
lowerCamelCase = coll_max + 1 - coll_min
lowerCamelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , snake_case__ ):
lowerCamelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCamelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , snake_case__ ) ):
lowerCamelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ ( snake_case__ ) -> Optional[int]:
return "".join([chr(snake_case__ ) for i in counting_sort([ord(snake_case__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowerCAmelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 168 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def _lowerCAmelCase ( self , _a = "auto" ):
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , _a , _a=16_000 , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
"""simple docstring"""
lowerCamelCase = self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
lowerCamelCase = self.speech_model.generate(_a , max_length=480_000 )
lowerCamelCase = self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
lowerCamelCase = 1
elif isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_a )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
# get prompt text embeddings
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase , lowerCamelCase , lowerCamelCase = text_embeddings.shape
lowerCamelCase = text_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase = 42
if negative_prompt is None:
lowerCamelCase = [""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='
f' {type(_a )}.' )
elif isinstance(_a , _a ):
lowerCamelCase = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowerCamelCase = negative_prompt
lowerCamelCase = text_input_ids.shape[-1]
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase = uncond_embeddings.shape[1]
lowerCamelCase = uncond_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
lowerCamelCase = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase = {}
if accepts_eta:
lowerCamelCase = eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
lowerCamelCase = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase = noise_pred.chunk(2 )
lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
lowerCamelCase = 1 / 0.18_215 * latents
lowerCamelCase = self.vae.decode(_a ).sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 168 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCamelCase_ )
UpperCamelCase = CLIPTextModelWithProjection(lowerCamelCase_ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCamelCase_ )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCamelCase = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = sd_pipe(**lowerCamelCase_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs["""prompt"""]]
UpperCamelCase = sd_pipe(**lowerCamelCase_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
UpperCamelCase = sd_pipe(
**lowerCamelCase_ , prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , pooled_prompt_embeds=lowerCamelCase_ , negative_pooled_prompt_embeds=lowerCamelCase_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict="cpu" , lowerCamelCase_ : List[str]=torch.floataa , lowerCamelCase_ : Tuple=0 ):
"""simple docstring"""
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_inputs(lowerCamelCase_ )
UpperCamelCase = pipe(**lowerCamelCase_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 165 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
if return_pvalue:
_snake_case = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 341 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase = TypeVar('T')
__lowerCAmelCase = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase = Union[str, bytes, os.PathLike]
| 341 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
snake_case = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
snake_case = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCamelCase__ : List[str] = bs[:]
UpperCamelCase__ : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase__ : int = [chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
UpperCamelCase__ : List[str] = set()
UpperCamelCase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ : Dict = char
return pairs
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int="replace" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : List[str]=False , **UpperCAmelCase_ : Any , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
UpperCamelCase__ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
UpperCamelCase__ : str = json.load(UpperCAmelCase_ )
UpperCamelCase__ : int = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : int = errors # how to handle errors in decoding
UpperCamelCase__ : Any = bytes_to_unicode()
UpperCamelCase__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
UpperCamelCase__ : Tuple = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase__ : Union[str, Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
UpperCamelCase__ : Any = {}
UpperCamelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _A ( self : Optional[Any] ):
return len(self.encoder )
def _A ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[Any] = tuple(UpperCAmelCase_ )
UpperCamelCase__ : Tuple = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = 0
while i < len(UpperCAmelCase_ ):
try:
UpperCamelCase__ : Dict = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ : Optional[int] = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ : Union[str, Any] = tuple(UpperCAmelCase_ )
UpperCamelCase__ : Optional[int] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
UpperCamelCase__ : Union[str, Any] = get_pairs(UpperCAmelCase_ )
UpperCamelCase__ : str = " ".join(UpperCAmelCase_ )
UpperCamelCase__ : Optional[int] = word
return word
def _A ( self : Dict , UpperCAmelCase_ : Any ):
UpperCamelCase__ : List[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_ ):
UpperCamelCase__ : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_ ).split(" " ) )
return bpe_tokens
def _A ( self : str , UpperCAmelCase_ : List[str] ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def _A ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
return self.decoder.get(UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : str ):
UpperCamelCase__ : Tuple = "".join(UpperCAmelCase_ )
UpperCamelCase__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _A ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
UpperCamelCase__ : Any = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase__ : Dict = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _A ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Tuple = [self.cls_token_id]
UpperCamelCase__ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def _A ( self : Optional[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
UpperCamelCase__ : Dict = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : List[str] ):
UpperCamelCase__ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase__ : int = " " + text
return (text, kwargs)
| 359 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , lowerCAmelCase_ , )
super().__init__(args=lowerCAmelCase_ , **lowerCAmelCase_ )
| 42 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str | None:
_snake_case = ""
_snake_case = 42
_snake_case = 42
_snake_case = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
_snake_case = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def SCREAMING_SNAKE_CASE__ ( __A ) -> list[str]:
_snake_case = []
for key in product(__A , repeat=3 ):
_snake_case = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( __A = "p059_cipher.txt" ) -> int:
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = Path(__A ).parent.joinpath(__A ).read_text(encoding='utf-8' )
_snake_case = [int(__A ) for number in data.strip().split(',' )]
_snake_case = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
_snake_case = filter_common_word(__A , __A )
if len(__A ) == 1:
break
_snake_case = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def A_ ( _lowerCAmelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_a = nums[0]
for i in range(1, len(_lowerCAmelCase ) ):
_a = nums[i]
_a = max(_lowerCAmelCase, ans + num, _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case = int(input('''Enter number of elements : ''').strip())
__snake_case = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 153 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[str], _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = BertConfig.from_json_file(_lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 153 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.