code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a_ = "swin"
a_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , __A : Optional[int]=2_2_4 , __A : Optional[int]=4 , __A : Any=3 , __A : List[Any]=9_6 , __A : Union[str, Any]=[2, 2, 6, 2] , __A : List[Any]=[3, 6, 1_2, 2_4] , __A : str=7 , __A : Any=4.0 , __A : int=True , __A : int=0.0 , __A : Union[str, Any]=0.0 , __A : Union[str, Any]=0.1 , __A : Optional[Any]="gelu" , __A : Dict=False , __A : List[Any]=0.0_2 , __A : Any=1e-5 , __A : Optional[int]=3_2 , __A : Optional[int]=None , __A : str=None , **__A : Dict , ):
super().__init__(**__A )
snake_case__ : str = image_size
snake_case__ : Optional[Any] = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Any = embed_dim
snake_case__ : Optional[Any] = depths
snake_case__ : Tuple = len(__A )
snake_case__ : int = num_heads
snake_case__ : str = window_size
snake_case__ : Dict = mlp_ratio
snake_case__ : List[str] = qkv_bias
snake_case__ : str = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : Optional[int] = int(embed_dim * 2 ** (len(__A ) - 1) )
snake_case__ : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
snake_case__ : int = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = version.parse("1.11" )
@property
def _lowercase ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self : int ):
return 1e-4
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
random.seed(snake_case_ )
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : Iterable[torch.nn.Parameter] , __A : float = 0.9_9_9_9 , __A : float = 0.0 , __A : int = 0 , __A : bool = False , __A : Union[float, int] = 1.0 , __A : Union[float, int] = 2 / 3 , __A : Optional[Any] = None , __A : Dict[str, Any] = None , **__A : List[str] , ):
if isinstance(__A , torch.nn.Module ):
snake_case__ : Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __A , standard_warn=__A , )
snake_case__ : Union[str, Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case__ : List[str] = True
if kwargs.get("max_value" , __A ) is not None:
snake_case__ : Any = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __A , standard_warn=__A )
snake_case__ : str = kwargs["max_value"]
if kwargs.get("min_value" , __A ) is not None:
snake_case__ : Dict = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __A , standard_warn=__A )
snake_case__ : Any = kwargs["min_value"]
snake_case__ : Tuple = list(__A )
snake_case__ : str = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __A ) is not None:
snake_case__ : Dict = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __A , standard_warn=__A )
self.to(device=kwargs["device"] )
snake_case__ : List[str] = None
snake_case__ : Tuple = decay
snake_case__ : Dict = min_decay
snake_case__ : int = update_after_step
snake_case__ : Any = use_ema_warmup
snake_case__ : Union[str, Any] = inv_gamma
snake_case__ : Optional[int] = power
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = None # set in `step()`
snake_case__ : List[Any] = model_cls
snake_case__ : int = model_config
@classmethod
def _lowercase ( cls : Dict , __A : Optional[int] , __A : int ):
snake_case__ : Any = model_cls.load_config(__A , return_unused_kwargs=__A )
snake_case__ : Tuple = model_cls.from_pretrained(__A )
snake_case__ : List[Any] = cls(model.parameters() , model_cls=__A , model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def _lowercase ( self : int , __A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
snake_case__ : Any = self.model_cls.from_config(self.model_config )
snake_case__ : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , __A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def _lowercase ( self : Tuple , __A : int ):
snake_case__ : str = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case__ : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case__ : Union[str, Any] = (1 + step) / (1_0 + step)
snake_case__ : List[Any] = min(__A , self.decay )
# make sure decay is not smaller than min_decay
snake_case__ : Union[str, Any] = max(__A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _lowercase ( self : Any , __A : Iterable[torch.nn.Parameter] ):
if isinstance(__A , torch.nn.Module ):
snake_case__ : int = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __A , standard_warn=__A , )
snake_case__ : str = parameters.parameters()
snake_case__ : List[Any] = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case__ : str = self.get_decay(self.optimization_step )
snake_case__ : Optional[Any] = decay
snake_case__ : str = 1 - decay
snake_case__ : Tuple = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case__ : Any = deepspeed.zero.GatheredParameters(__A , modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def _lowercase ( self : str , __A : Iterable[torch.nn.Parameter] ):
snake_case__ : Union[str, Any] = list(__A )
for s_param, param in zip(self.shadow_params , __A ):
param.data.copy_(s_param.to(param.device ).data )
def _lowercase ( self : Optional[int] , __A : Tuple=None , __A : Tuple=None ):
snake_case__ : Union[str, Any] = [
p.to(device=__A , dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def _lowercase ( self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _lowercase ( self : Tuple , __A : Iterable[torch.nn.Parameter] ):
snake_case__ : Any = [param.detach().cpu().clone() for param in parameters]
def _lowercase ( self : str , __A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __A ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case__ : Dict = None
def _lowercase ( self : List[str] , __A : dict ):
snake_case__ : Tuple = copy.deepcopy(__A )
snake_case__ : List[Any] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
snake_case__ : Optional[Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __A ):
raise ValueError("Invalid min_decay" )
snake_case__ : int = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __A ):
raise ValueError("Invalid optimization_step" )
snake_case__ : List[str] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __A ):
raise ValueError("Invalid update_after_step" )
snake_case__ : Dict = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __A ):
raise ValueError("Invalid use_ema_warmup" )
snake_case__ : Any = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
snake_case__ : Union[str, Any] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
snake_case__ : List[str] = state_dict.get("shadow_params" , __A )
if shadow_params is not None:
snake_case__ : Dict = shadow_params
if not isinstance(self.shadow_params , __A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : Dict = 1_0
def _lowercase ( self : int ):
snake_case__ : str = [1, 2, 3, 4]
snake_case__ : Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def _lowercase ( self : Dict ):
snake_case__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
snake_case__ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
snake_case__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
snake_case__ : Any = process_story(__A )
self.assertEqual(__A , [] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = ""
snake_case__ : Tuple = process_story(__A )
self.assertEqual(__A , [] )
self.assertEqual(__A , [] )
def _lowercase ( self : Dict ):
snake_case__ : int = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
snake_case__ : Tuple = process_story(__A )
snake_case__ : Optional[Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__A , __A )
snake_case__ : Optional[Any] = ["It was the best of times."]
self.assertEqual(__A , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = torch.tensor([1, 2, 3, 4] )
snake_case__ : Optional[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__A , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ):
snake_case__ : Tuple = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
snake_case__ : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ):
snake_case__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case__ : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = 1_0_1
snake_case__ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
snake_case__ : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case__ : Union[str, Any] = compute_token_type_ids(__A , __A )
np.testing.assert_array_equal(__A , __A )
| 709 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : SplitDict ):
snake_case__ : List[Any] = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
snake_case__ : str = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case__ : Tuple = None
# the split name of split_dict takes over the name of the split info object
snake_case__ : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name="my_dataset" )] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case__ : Optional[int] = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 710 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
# getting number of pixels in the image
snake_case__ : Optional[int] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case_ ):
for j in range(snake_case_ ):
snake_case__ : Optional[int] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCamelCase : Optional[Any] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__lowerCamelCase : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Dict , __A : Optional[int]=9_9 , __A : Any=1_3 , __A : Dict=7 , __A : Tuple=9 , __A : int=True , __A : Dict=True , __A : Tuple=False , __A : Dict=3_2 , __A : List[Any]=5 , __A : int=4 , __A : Tuple=3_7 , __A : Tuple=8 , __A : int=0.1 , __A : int=0.0_0_2 , __A : int=1 , __A : Tuple=0 , __A : Dict=0 , __A : List[Any]=None , __A : Optional[int]=None , ):
snake_case__ : Any = parent
snake_case__ : int = batch_size
snake_case__ : Dict = encoder_seq_length
snake_case__ : Optional[Any] = decoder_seq_length
# For common tests
snake_case__ : str = self.decoder_seq_length
snake_case__ : Any = is_training
snake_case__ : int = use_attention_mask
snake_case__ : Any = use_labels
snake_case__ : str = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : List[Any] = d_ff
snake_case__ : List[str] = relative_attention_num_buckets
snake_case__ : str = dropout_rate
snake_case__ : int = initializer_factor
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Tuple = pad_token_id
snake_case__ : List[str] = decoder_start_token_id
snake_case__ : Optional[Any] = None
snake_case__ : List[str] = decoder_layers
def _lowercase ( self : Union[str, Any] ):
return TaConfig.from_pretrained("google/umt5-base" )
def _lowercase ( self : Any , __A : Optional[int] , __A : str , __A : Optional[Any] , __A : int=None , __A : Any=None , __A : List[Any]=None , __A : Any=None , __A : Any=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : Union[str, Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A )
if decoder_head_mask is None:
snake_case__ : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A )
if cross_attn_head_mask is None:
snake_case__ : Any = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = self.get_config()
snake_case__ : List[str] = config.num_attention_heads
snake_case__ : Union[str, Any] = self.prepare_inputs_dict(__A , __A , __A )
return config, input_dict
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : str ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self : Tuple ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self : List[str] , __A : Optional[int] , __A : Optional[int] , __A : List[Any] , __A : str , __A : Any , __A : List[str] , ):
snake_case__ : List[Any] = UMTaModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[Any] = model(
input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , )
snake_case__ : List[str] = model(input_ids=__A , decoder_input_ids=__A )
snake_case__ : int = result.last_hidden_state
snake_case__ : Optional[Any] = result.past_key_values
snake_case__ : Optional[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowercase ( self : List[str] , __A : Tuple , __A : Dict , __A : int , __A : List[str] , __A : Optional[int] , __A : Any , ):
snake_case__ : Optional[Any] = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
snake_case__ : Tuple = model(__A , use_cache=__A )
snake_case__ : List[str] = model(__A )
snake_case__ : List[str] = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
snake_case__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = model(__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , past_key_values=__A )["last_hidden_state"]
# select random slice
snake_case__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def _lowercase ( self : int , __A : Optional[int] , __A : List[str] , ):
snake_case__ : List[str] = UMTaModel(config=__A ).to(__A ).half().eval()
snake_case__ : Any = model(**__A )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def _lowercase ( self : int ):
snake_case__ : Tuple = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Tuple = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__A , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowercase ( self : Optional[int] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : List[Any] = config_and_inputs[0]
snake_case__ : Optional[int] = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
snake_case__ : Optional[Any] = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=__A ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
}
for attn_name, (name, mask) in zip(__A , head_masking.items() ):
snake_case__ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case__ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=__A )
snake_case__ : str = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case__ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def _lowercase ( self : List[str] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def _lowercase ( self : Dict ):
snake_case__ : List[Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__A ).to(__A )
snake_case__ : Dict = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__A , legacy=__A )
snake_case__ : Any = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
snake_case__ : List[str] = tokenizer(__A , return_tensors="pt" , padding=__A ).input_ids
# fmt: off
snake_case__ : Optional[int] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A , __A )
snake_case__ : Dict = model.generate(input_ids.to(__A ) )
snake_case__ : Optional[int] = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
snake_case__ : str = tokenizer.batch_decode(__A )
self.assertEqual(__A , __A )
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = None
@experimental
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return _map_with_joblib(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int ):
snake_case__ : Union[str, Any] = num_proc if num_proc <= len(snake_case_ ) else len(snake_case_ )
snake_case__ : Dict = [] # We organize the splits ourselve (contiguous splits)
for index in range(snake_case_ ):
snake_case__ : Tuple = len(snake_case_ ) // num_proc
snake_case__ : Dict = len(snake_case_ ) % num_proc
snake_case__ : List[str] = div * index + min(snake_case_ , snake_case_ )
snake_case__ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(snake_case_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(snake_case_ )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(snake_case_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
snake_case__ : Any = None, None
if not disable_tqdm:
snake_case__ : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(snake_case_ , initargs=snake_case_ , initializer=snake_case_ ) as pool:
snake_case__ : List[Any] = pool.map(snake_case_ , snake_case_ )
logger.info(F'''Finished {num_proc} processes''' )
snake_case__ : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(snake_case_ )} objects''' )
return mapped
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=snake_case_ ):
return joblib.Parallel()(
joblib.delayed(snake_case_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Optional[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case__ : Optional[Any] = None
| 714 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Dict = int(snake_case_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case_ )
snake_case__ : Dict = divmod(snake_case_ , 2 )
return binary_recursive(snake_case_ ) + str(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Union[str, Any] = str(snake_case_ ).strip()
if not number:
raise ValueError("No input value was provided" )
snake_case__ : Optional[int] = "-" if number.startswith("-" ) else ""
snake_case__ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F'''{negative}0b{binary_recursive(int(snake_case_ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 0 |
__lowerCamelCase : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[Any] = input("Enter message: " )
snake_case__ : Any = input("Enter key [alphanumeric]: " )
snake_case__ : Optional[int] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
snake_case__ : Any = "encrypt"
snake_case__ : List[Any] = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
snake_case__ : Dict = "decrypt"
snake_case__ : List[str] = decrypt_message(snake_case_ , snake_case_ )
print(F'''\n{mode.title()}ed message:''' )
print(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return translate_message(snake_case_ , snake_case_ , "encrypt" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return translate_message(snake_case_ , snake_case_ , "decrypt" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : str ):
snake_case__ : Optional[int] = []
snake_case__ : List[str] = 0
snake_case__ : Union[str, Any] = key.upper()
for symbol in message:
snake_case__ : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case_ ):
snake_case__ : Optional[Any] = 0
else:
translated.append(snake_case_ )
return "".join(snake_case_ )
if __name__ == "__main__":
main()
| 716 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 0 |
import math
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowerCamelCase : Tuple = """Enter the base and the power separated by a comma: """
__lowerCamelCase : List[Any] = map(int, input(prompt).split(""","""))
__lowerCamelCase : Any = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowerCamelCase : Union[str, Any] = res(xa, ya)
__lowerCamelCase : Optional[int] = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 717 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 0 |
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : int | None = None ):
snake_case__ : Dict = value
snake_case__ : Node | None = None # Added in order to delete a node easier
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , __A : Node | None = None ):
snake_case__ : Union[str, Any] = root
def __str__( self : Optional[int] ):
return str(self.root )
def _lowercase ( self : Dict , __A : Node , __A : Node | None ):
if new_children is not None: # reset its kids
snake_case__ : Any = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__A ): # If it is the right children
snake_case__ : List[str] = new_children
else:
snake_case__ : Dict = new_children
else:
snake_case__ : Union[str, Any] = new_children
def _lowercase ( self : Tuple , __A : Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self : List[Any] ):
return self.root is None
def _lowercase ( self : Dict , __A : Optional[int] ):
snake_case__ : List[str] = Node(__A ) # create a new Node
if self.empty(): # if Tree is empty
snake_case__ : List[str] = new_node # set its root
else: # Tree is not empty
snake_case__ : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case__ : Optional[int] = new_node # We insert the new node in a leaf
break
else:
snake_case__ : int = parent_node.left
else:
if parent_node.right is None:
snake_case__ : Union[str, Any] = new_node
break
else:
snake_case__ : Dict = parent_node.right
snake_case__ : int = parent_node
def _lowercase ( self : Optional[Any] , *__A : Tuple ):
for value in values:
self.__insert(__A )
def _lowercase ( self : List[str] , __A : List[Any] ):
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
snake_case__ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case__ : List[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self : Dict , __A : Node | None = None ):
if node is None:
if self.root is None:
return None
snake_case__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
snake_case__ : Any = node.right
return node
def _lowercase ( self : Union[str, Any] , __A : Node | None = None ):
if node is None:
snake_case__ : str = self.root
if self.root is None:
return None
if not self.empty():
snake_case__ : int = self.root
while node.left is not None:
snake_case__ : str = node.left
return node
def _lowercase ( self : Tuple , __A : int ):
snake_case__ : Tuple = self.search(__A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__A , __A )
elif node.left is None: # Has only right children
self.__reassign_nodes(__A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__A , node.left )
else:
snake_case__ : Any = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case__ : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self : Dict , __A : Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self : Optional[int] , __A : Optional[int]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self : Optional[int] , __A : list , __A : Node | None ):
if node:
self.inorder(__A , node.left )
arr.append(node.value )
self.inorder(__A , node.right )
def _lowercase ( self : str , __A : int , __A : Node ):
snake_case__ : list[int] = []
self.inorder(__A , __A ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None ):
snake_case__ : Union[str, Any] = []
if curr_node is not None:
snake_case__ : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case__ : int = BinarySearchTree()
for i in testlist:
t.insert(snake_case_ )
# Prints all the elements of the list in order traversal
print(snake_case_ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 718 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Tuple=False ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = len(set_a.intersection(snake_case_ ) )
if alternative_union:
snake_case__ : List[Any] = len(snake_case_ ) + len(snake_case_ )
else:
snake_case__ : List[str] = len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) ):
snake_case__ : Dict = [element for element in set_a if element in set_b]
if alternative_union:
snake_case__ : int = len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
snake_case__ : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
__lowerCamelCase : str = {"""a""", """b""", """c""", """d""", """e"""}
__lowerCamelCase : int = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 719 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 720 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__lowerCamelCase : Any = logging.getLogger(__name__)
__lowerCamelCase : Union[str, Any] = """Hello world! cécé herlolip"""
__lowerCamelCase : str = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
snake_case__ : Tuple = BertAbsConfig(
temp_dir="." , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
snake_case__ : Dict = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
snake_case__ : Union[str, Any] = AbsSummarizer(snake_case_ , torch.device("cpu" ) , snake_case_ )
original.eval()
snake_case__ : List[str] = BertAbsSummarizer(snake_case_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
snake_case__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
snake_case__ : List[str] = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
snake_case__ : Union[str, Any] = torch.tensor(snake_case_ ).unsqueeze(0 )
snake_case__ : Dict = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
snake_case__ : List[Any] = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case__ : str = encoder_input_ids
snake_case__ : Any = decoder_input_ids
snake_case__ : Union[str, Any] = None
snake_case__ : str = None
snake_case__ : Optional[Any] = None
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case__ : Optional[int] = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
snake_case__ : Tuple = original.generator(snake_case_ )
snake_case__ : Optional[Any] = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
snake_case__ : int = new_model.generator(snake_case_ )
snake_case__ : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case_ ) )
snake_case__ : str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case_ ) )
snake_case__ : Tuple = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 721 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "roformer"
def __init__( self : Optional[int] , __A : Tuple=5_0_0_0_0 , __A : Optional[Any]=None , __A : Any=7_6_8 , __A : Optional[Any]=1_2 , __A : Tuple=1_2 , __A : Dict=3_0_7_2 , __A : Dict="gelu" , __A : Any=0.1 , __A : Union[str, Any]=0.1 , __A : str=1_5_3_6 , __A : Optional[Any]=2 , __A : List[Any]=0.0_2 , __A : List[Any]=1e-1_2 , __A : Tuple=0 , __A : Optional[Any]=False , __A : str=True , **__A : Any , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : int = hidden_size if embedding_size is None else embedding_size
snake_case__ : str = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : Any = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : Any = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : List[str] = rotary_value
snake_case__ : List[str] = use_cache
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : List[Any] ):
if self.task == "multiple-choice":
snake_case__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Optional[int] = {0: "batch", 1: "sequence"}
snake_case__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 700 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCamelCase : Union[str, Any] = HfArgumentParser(InitializationArguments)
__lowerCamelCase : Optional[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCamelCase : str = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__lowerCamelCase : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCamelCase : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Any = {
"""RUCAIBox/mvp""": 1024,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
a_ = MvpTokenizer
def __init__( self : Optional[int] , __A : Optional[int]=None , __A : int=None , __A : str=None , __A : List[str]="replace" , __A : Optional[int]="<s>" , __A : str="</s>" , __A : Optional[int]="</s>" , __A : Optional[int]="<s>" , __A : str="<unk>" , __A : int="<pad>" , __A : List[str]="<mask>" , __A : Dict=False , __A : int=True , **__A : List[str] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Dict = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : Optional[Any] = add_prefix_space
snake_case__ : str = pre_tok_class(**__A )
snake_case__ : Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : Optional[Any] = "post_processor"
snake_case__ : List[Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case__ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
snake_case__ : List[str] = tuple(state["cls"] )
snake_case__ : int = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Tuple = add_prefix_space
snake_case__ : Optional[Any] = True
if state.get("trim_offsets" , __A ) != trim_offsets:
snake_case__ : Dict = trim_offsets
snake_case__ : int = True
if changes_to_apply:
snake_case__ : int = getattr(__A , state.pop("type" ) )
snake_case__ : Optional[Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def _lowercase ( self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : List[Any] , __A : Union[str, Any] ):
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case__ : str = value
def _lowercase ( self : Optional[Any] , *__A : str , **__A : Tuple ):
snake_case__ : Any = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : Tuple , *__A : Optional[int] , **__A : str ):
snake_case__ : Union[str, Any] = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : Any , __A : str , __A : Optional[str] = None ):
snake_case__ : Optional[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : Optional[int] , __A : Optional[int] , __A : int=None ):
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : str , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )
if "model" in sd.keys():
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )["model"]
# pop unnecessary weights
snake_case__ : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
snake_case__ : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case__ : Optional[int] = sd.pop(snake_case_ )
snake_case__ : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case__ : int = sd[key]
# We split QKV in separate Q,K,V
snake_case__ : List[Any] = key.replace(".qkv_proj." , ".q_proj." )
snake_case__ : Any = key.replace(".qkv_proj." , ".k_proj." )
snake_case__ : Optional[int] = key.replace(".qkv_proj." , ".v_proj." )
snake_case__ : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case__ : Dict = torch.split(snake_case_ , depth // 3 , dim=0 )
snake_case__ : Union[str, Any] = q
snake_case__ : Optional[Any] = k
snake_case__ : Union[str, Any] = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any=None ):
snake_case__ : int = load_checkpoint(snake_case_ )
if config is not None:
snake_case__ : Tuple = OPTConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = OPTConfig()
snake_case__ : Union[str, Any] = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 703 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 0 |
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
snake_case__ : int = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
snake_case__ : Union[str, Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 704 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def SCREAMING_SNAKE_CASE ( snake_case_ : dict[int, list[int]] ):
snake_case__ : int = 0
snake_case__ : Dict = len(snake_case_ ) # No of vertices in graph
snake_case__ : int = [0] * n
snake_case__ : List[str] = [False] * n
def dfs(snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[str] ):
snake_case__ : Dict = True
snake_case__ : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case_ , snake_case_ , snake_case_ , id_ )
snake_case__ : int = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case__ : str = min(low[at] , low[to] )
snake_case__ : list[tuple[int, int]] = []
for i in range(snake_case_ ):
if not visited[i]:
dfs(snake_case_ , -1 , snake_case_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
if len(snake_case_ ) == 0:
return False
snake_case__ : Dict = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : str = input("""Enter numbers separated by comma:\n""").strip()
__lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
__lowerCamelCase : List[str] = int(input("""Enter the number to be found in the list:\n""").strip())
__lowerCamelCase : Tuple = """""" if binary_search(sequence, target) else """not """
print(f"{target} was {not_str}found in {sequence}")
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str ):
snake_case__ : List[str] = psutil.Process()
snake_case__ : int = False
def _lowercase ( self : int ):
snake_case__ : List[str] = -1
while True:
snake_case__ : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = True
snake_case__ : int = threading.Thread(target=self.peak_monitor )
snake_case__ : Dict = True
self.thread.start()
def _lowercase ( self : Optional[int] ):
snake_case__ : str = False
self.thread.join()
return self.cpu_memory_peak
__lowerCamelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ):
# Time
snake_case__ : int = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : Dict = torch.cuda.memory_allocated(snake_case_ )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# Time
snake_case__ : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : int = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
snake_case__ : Dict = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : Dict = (torch.cuda.memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
snake_case__ : Optional[Any] = (torch.cuda.max_memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(snake_case_ )]:.2f}MiB''' )
snake_case__ : Tuple = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
snake_case__ : Optional[int] = 0
snake_case__ : List[str] = str(snake_case_ )
while len(snake_case_ ) != 1:
snake_case__ : Optional[Any] = [int(snake_case_ ) for i in num_string]
snake_case__ : int = 1
for i in range(0 , len(snake_case_ ) ):
total *= numbers[i]
snake_case__ : Tuple = str(snake_case_ )
steps += 1
return steps
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
snake_case__ : Any = 0
snake_case__ : List[str] = str(snake_case_ )
while len(snake_case_ ) != 1:
snake_case__ : int = [int(snake_case_ ) for i in num_string]
snake_case__ : Any = 0
for i in range(0 , len(snake_case_ ) ):
total += numbers[i]
snake_case__ : Tuple = str(snake_case_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
__lowerCamelCase : int = range(2, 20 + 1)
__lowerCamelCase : int = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int] ):
snake_case__ : Optional[int] = sum(a_i[j] for j in range(snake_case_ , len(snake_case_ ) ) )
snake_case__ : Optional[Any] = sum(a_i[j] * base[j] for j in range(min(len(snake_case_ ) , snake_case_ ) ) )
snake_case__ : Tuple = 0, 0
snake_case__ : Tuple = n - i
snake_case__ : int = memo.get(snake_case_ )
if sub_memo is not None:
snake_case__ : Tuple = sub_memo.get(snake_case_ )
if jumps is not None and len(snake_case_ ) > 0:
# find and make the largest jump without going over
snake_case__ : Dict = -1
for _k in range(len(snake_case_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case__ : Union[str, Any] = _k
break
if max_jump >= 0:
snake_case__ : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case__ : str = diff + c
for j in range(min(snake_case_ , len(snake_case_ ) ) ):
snake_case__ : List[Any] = divmod(snake_case_ , 10 )
if new_c > 0:
add(snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : List[Any] = []
else:
snake_case__ : str = {c: []}
snake_case__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case__ : Any = next_term(snake_case_ , k - 1 , i + dn , snake_case_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case__ : List[Any] = compute(snake_case_ , snake_case_ , i + dn , snake_case_ )
diff += _diff
dn += terms_jumped
snake_case__ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case__ : Optional[int] = 0
while j < len(snake_case_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case_ , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] ):
if i >= n:
return 0, i
if k > len(snake_case_ ):
a_i.extend([0 for _ in range(k - len(snake_case_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case__ : Tuple = i
snake_case__ : List[Any] = 0, 0, 0
for j in range(len(snake_case_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case__ : str = ds_c + ds_b
diff += addend
snake_case__ : Any = 0
for j in range(snake_case_ ):
snake_case__ : Optional[int] = a_i[j] + addend
snake_case__ : str = divmod(snake_case_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case_ , snake_case_ , snake_case_ )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[str] ):
for j in range(snake_case_ , len(snake_case_ ) ):
snake_case__ : int = digits[j] + addend
if s >= 10:
snake_case__ : List[Any] = divmod(snake_case_ , 10 )
snake_case__ : Optional[int] = addend // 10 + quotient
else:
snake_case__ : Optional[Any] = s
snake_case__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
snake_case__ : List[str] = divmod(snake_case_ , 10 )
digits.append(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10**15 ):
snake_case__ : List[Any] = [1]
snake_case__ : int = 1
snake_case__ : List[Any] = 0
while True:
snake_case__ : Optional[Any] = next_term(snake_case_ , 20 , i + dn , snake_case_ )
dn += terms_jumped
if dn == n - i:
break
snake_case__ : Union[str, Any] = 0
for j in range(len(snake_case_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"{solution() = }")
| 709 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __A : Optional[Any] , __A : Union[str, Any]=7 , __A : Any=3 , __A : List[str]=1_8 , __A : Any=3_0 , __A : Tuple=4_0_0 , __A : Any=True , __A : Tuple=None , __A : Optional[Any]=True , __A : str=None , __A : str=True , ):
snake_case__ : Optional[int] = size if size is not None else {"shortest_edge": 2_0}
snake_case__ : int = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = num_channels
snake_case__ : List[str] = image_size
snake_case__ : Optional[int] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : str = do_resize
snake_case__ : int = size
snake_case__ : Optional[int] = do_center_crop
snake_case__ : List[Any] = crop_size
snake_case__ : Optional[Any] = do_flip_channel_order
def _lowercase ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self : List[str] ):
snake_case__ : Dict = MobileViTImageProcessingTester(self )
@property
def _lowercase ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "do_center_crop" ) )
self.assertTrue(hasattr(__A , "center_crop" ) )
self.assertTrue(hasattr(__A , "do_flip_channel_order" ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case__ : Optional[int] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self : str ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 710 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : int ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(__A )
def _lowercase ( self : int ):
snake_case__ : str = self._create_example_records()
snake_case__ : List[Any] = Dataset.from_list(__A )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(__A ):
self.assertDictEqual(__A , example_records[i] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : int = self._create_example_records()
snake_case__ : Dict = Dataset.from_list(__A )
snake_case__ : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowercase ( self : List[str] ): # checks what happens with missing columns
snake_case__ : Union[str, Any] = [{"col_1": 1}, {"col_2": "x"}]
snake_case__ : Union[str, Any] = Dataset.from_list(__A )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _lowercase ( self : Union[str, Any] ): # checks if the type can be inferred from the second record
snake_case__ : List[Any] = [{"col_1": []}, {"col_1": [1, 2]}]
snake_case__ : int = Dataset.from_list(__A )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _lowercase ( self : Any ):
snake_case__ : Tuple = Dataset.from_list([] )
self.assertEqual(len(__A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import string
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Union[str, Any] = ""
for i in sequence:
snake_case__ : int = ord(snake_case_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : str = string.ascii_letters
snake_case__ : Optional[int] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case_ )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("Running performance benchmarks..." )
snake_case__ : str = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=snake_case_ )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=snake_case_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 712 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "num_attention_heads" ) )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , __A : Optional[int] , __A : Dict=1_3 , __A : Union[str, Any]=6_4 , __A : int=3 , __A : List[str]=3 , __A : Optional[Any]=2 , __A : Dict=1 , __A : Optional[int]=1_6 , __A : Any=[1_2_8, 2_5_6, 3_8_4] , __A : List[str]=[4, 6, 8] , __A : Optional[Any]=[2, 3, 4] , __A : str=[1_6, 1_6, 1_6] , __A : Dict=0 , __A : Dict=[2, 2, 2] , __A : Dict=[2, 2, 2] , __A : Dict=0.0_2 , __A : Dict=True , __A : Dict=True , __A : str=2 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = num_channels
snake_case__ : int = kernel_size
snake_case__ : Dict = stride
snake_case__ : Union[str, Any] = padding
snake_case__ : int = hidden_sizes
snake_case__ : List[str] = num_attention_heads
snake_case__ : int = depths
snake_case__ : Optional[Any] = key_dim
snake_case__ : Dict = drop_path_rate
snake_case__ : Union[str, Any] = patch_size
snake_case__ : str = attention_ratio
snake_case__ : Union[str, Any] = mlp_ratio
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case__ : Union[str, Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[Any] = num_labels
snake_case__ : Dict = initializer_range
def _lowercase ( self : Tuple ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowercase ( self : Dict , __A : int , __A : List[str] , __A : List[str] ):
snake_case__ : Any = LevitModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[int] = model(__A )
snake_case__ : Optional[int] = (self.image_size, self.image_size)
snake_case__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case__ : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowercase ( self : Dict , __A : Tuple , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : Optional[Any] = LevitForImageClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__ : Dict = config_and_inputs
snake_case__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : str = LevitModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _lowercase ( self : Dict ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _lowercase ( self : str ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : int ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__A )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Tuple ):
def check_hidden_states_output(__A : Any , __A : Dict , __A : Any ):
snake_case__ : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) , __A )
snake_case__ : Dict = (self.model_tester.image_size, self.model_tester.image_size)
snake_case__ : str = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case__ : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Tuple ):
pass
def _lowercase ( self : List[str] , __A : str , __A : Optional[int] , __A : Optional[int]=False ):
snake_case__ : Optional[Any] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _lowercase ( self : List[Any] ):
if not self.model_tester.is_training:
return
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case__ : List[str] = model_class(__A )
model.to(__A )
model.train()
snake_case__ : Dict = self._prepare_for_class(__A , __A , return_labels=__A )
snake_case__ : Tuple = model(**__A ).loss
loss.backward()
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : str = False
snake_case__ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case__ : Union[str, Any] = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
snake_case__ : int = self._prepare_for_class(__A , __A , return_labels=__A )
snake_case__ : Tuple = model(**__A ).loss
loss.backward()
def _lowercase ( self : Dict ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
snake_case__ : Optional[Any] = problem_type["title"]
snake_case__ : str = problem_type["num_labels"]
snake_case__ : int = model_class(__A )
model.to(__A )
model.train()
snake_case__ : Optional[int] = self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
snake_case__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
snake_case__ : List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
snake_case__ : List[str] = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowercase ( self : List[str] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__A )
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 714 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
__lowerCamelCase : List[Any] = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Dict = bs[:]
snake_case__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Tuple = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Optional[int] = set()
snake_case__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Union[str, Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : int , __A : Tuple , __A : Tuple , __A : Optional[int]="replace" , __A : Union[str, Any]="<s>" , __A : str="</s>" , __A : List[Any]="</s>" , __A : Union[str, Any]="<s>" , __A : Optional[int]="<unk>" , __A : Optional[int]="<pad>" , __A : Optional[Any]="<mask>" , __A : Optional[Any]=False , **__A : List[Any] , ):
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : int = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Any = errors # how to handle errors in decoding
snake_case__ : List[Any] = bytes_to_unicode()
snake_case__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : List[Any] = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : int = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : str = {}
snake_case__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Dict , __A : List[Any] ):
if token in self.cache:
return self.cache[token]
snake_case__ : int = tuple(__A )
snake_case__ : Union[str, Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Any = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ : int = bigram
snake_case__ : List[Any] = []
snake_case__ : Dict = 0
while i < len(__A ):
try:
snake_case__ : int = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : List[Any] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Optional[Any] = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : Tuple = get_pairs(__A )
snake_case__ : Tuple = " ".join(__A )
snake_case__ : Optional[Any] = word
return word
def _lowercase ( self : Union[str, Any] , __A : Tuple ):
snake_case__ : int = []
for token in re.findall(self.pat , __A ):
snake_case__ : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Tuple ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : int ):
return self.decoder.get(__A )
def _lowercase ( self : Optional[Any] , __A : int ):
snake_case__ : Tuple = "".join(__A )
snake_case__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : int , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : Any = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : List[Any] = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Tuple , __A : Dict , __A : Any=False , **__A : Union[str, Any] ):
snake_case__ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : List[Any] = " " + text
return (text, kwargs)
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) == 0:
return []
snake_case__ : Union[str, Any] = min(snake_case_ ), max(snake_case_ )
snake_case__ : Dict = int(max_value - min_value ) + 1
snake_case__ : list[list] = [[] for _ in range(snake_case_ )]
for i in my_list:
buckets[int(i - min_value )].append(snake_case_ )
return [v for bucket in buckets for v in sorted(snake_case_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 716 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "align_text_model"
def __init__( self : Dict , __A : Optional[Any]=3_0_5_2_2 , __A : Optional[int]=7_6_8 , __A : Dict=1_2 , __A : Any=1_2 , __A : List[str]=3_0_7_2 , __A : Tuple="gelu" , __A : List[Any]=0.1 , __A : List[str]=0.1 , __A : Optional[Any]=5_1_2 , __A : Optional[int]=2 , __A : int=0.0_2 , __A : Dict=1e-1_2 , __A : Optional[int]=0 , __A : List[Any]="absolute" , __A : str=True , **__A : Optional[Any] , ):
super().__init__(**__A )
snake_case__ : Any = vocab_size
snake_case__ : str = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : Tuple = intermediate_size
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : List[str] = pad_token_id
@classmethod
def _lowercase ( cls : Tuple , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__ : Dict = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
snake_case__ : Any = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "align_vision_model"
def __init__( self : Union[str, Any] , __A : int = 3 , __A : int = 6_0_0 , __A : float = 2.0 , __A : float = 3.1 , __A : int = 8 , __A : List[int] = [3, 3, 5, 3, 5, 5, 3] , __A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __A : List[int] = [] , __A : List[int] = [1, 2, 2, 2, 1, 2, 1] , __A : List[int] = [1, 2, 2, 3, 3, 4, 1] , __A : List[int] = [1, 6, 6, 6, 6, 6, 6] , __A : float = 0.2_5 , __A : str = "swish" , __A : int = 2_5_6_0 , __A : str = "mean" , __A : float = 0.0_2 , __A : float = 0.0_0_1 , __A : float = 0.9_9 , __A : float = 0.2 , **__A : Optional[int] , ):
super().__init__(**__A )
snake_case__ : Optional[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Optional[Any] = width_coefficient
snake_case__ : int = depth_coefficient
snake_case__ : List[str] = depth_divisor
snake_case__ : int = kernel_sizes
snake_case__ : str = in_channels
snake_case__ : Optional[Any] = out_channels
snake_case__ : Union[str, Any] = depthwise_padding
snake_case__ : Dict = strides
snake_case__ : Optional[int] = num_block_repeats
snake_case__ : Any = expand_ratios
snake_case__ : List[Any] = squeeze_expansion_ratio
snake_case__ : List[Any] = hidden_act
snake_case__ : Dict = hidden_dim
snake_case__ : str = pooling_type
snake_case__ : Any = initializer_range
snake_case__ : List[str] = batch_norm_eps
snake_case__ : Optional[Any] = batch_norm_momentum
snake_case__ : Any = drop_connect_rate
snake_case__ : Optional[int] = sum(__A ) * 4
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Dict ):
cls._set_token_in_kwargs(__A )
snake_case__ : Any = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
snake_case__ : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "align"
a_ = True
def __init__( self : int , __A : List[str]=None , __A : Any=None , __A : Tuple=6_4_0 , __A : int=1.0 , __A : Tuple=0.0_2 , **__A : Tuple , ):
super().__init__(**__A )
if text_config is None:
snake_case__ : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
snake_case__ : Union[str, Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
snake_case__ : Any = AlignTextConfig(**__A )
snake_case__ : List[str] = AlignVisionConfig(**__A )
snake_case__ : List[str] = projection_dim
snake_case__ : Union[str, Any] = temperature_init_value
snake_case__ : int = initializer_range
@classmethod
def _lowercase ( cls : Optional[Any] , __A : AlignTextConfig , __A : AlignVisionConfig , **__A : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : Dict = self.vision_config.to_dict()
snake_case__ : Dict = self.__class__.model_type
return output
| 717 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowerCamelCase : int = random.Random()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str=1.0 , snake_case_ : Dict=None , snake_case_ : Tuple=None ):
if rng is None:
snake_case__ : Optional[int] = global_rng
snake_case__ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : int=7 , __A : Any=4_0_0 , __A : Tuple=2_0_0_0 , __A : Tuple=2_0_4_8 , __A : List[str]=1_2_8 , __A : Dict=1 , __A : int=5_1_2 , __A : List[str]=3_0 , __A : List[Any]=4_4_1_0_0 , ):
snake_case__ : str = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = min_seq_length
snake_case__ : Optional[int] = max_seq_length
snake_case__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : List[str] = spectrogram_length
snake_case__ : Union[str, Any] = feature_size
snake_case__ : int = num_audio_channels
snake_case__ : Tuple = hop_length
snake_case__ : str = chunk_length
snake_case__ : Optional[int] = sampling_rate
def _lowercase ( self : str ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowercase ( self : int , __A : Tuple=False , __A : int=False ):
def _flatten(__A : Union[str, Any] ):
return list(itertools.chain(*__A ) )
if equal_length:
snake_case__ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : List[str] = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = TvltFeatureExtractor
def _lowercase ( self : Any ):
snake_case__ : Dict = TvltFeatureExtractionTester(self )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__A , "spectrogram_length" ) )
self.assertTrue(hasattr(__A , "feature_size" ) )
self.assertTrue(hasattr(__A , "num_audio_channels" ) )
self.assertTrue(hasattr(__A , "hop_length" ) )
self.assertTrue(hasattr(__A , "chunk_length" ) )
self.assertTrue(hasattr(__A , "sampling_rate" ) )
def _lowercase ( self : Dict ):
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[Any] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
snake_case__ : List[Any] = self.feature_extraction_class.from_pretrained(__A )
snake_case__ : List[str] = feat_extract_first.to_dict()
snake_case__ : str = feat_extract_second.to_dict()
snake_case__ : List[Any] = dict_first.pop("mel_filters" )
snake_case__ : Any = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _lowercase ( self : int ):
snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] = os.path.join(__A , "feat_extract.json" )
feat_extract_first.to_json_file(__A )
snake_case__ : Any = self.feature_extraction_class.from_json_file(__A )
snake_case__ : Dict = feat_extract_first.to_dict()
snake_case__ : Any = feat_extract_second.to_dict()
snake_case__ : List[str] = dict_first.pop("mel_filters" )
snake_case__ : int = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _lowercase ( self : List[str] ):
# Initialize feature_extractor
snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case__ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case__ : Union[str, Any] = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
snake_case__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case__ : Dict = feature_extractor(__A , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case__ : Union[str, Any] = feature_extractor(
__A , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=__A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case__ : Any = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case__ : str = np.asarray(__A )
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowercase ( self : Optional[int] , __A : Dict ):
snake_case__ : List[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case__ : Tuple = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = self._load_datasamples(1 )
snake_case__ : Any = TvltFeatureExtractor()
snake_case__ : List[str] = feature_extractor(__A , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
snake_case__ : Optional[int] = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __A , atol=1e-4 ) )
| 718 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
snake_case__ : list[list[str]] = [[] for _ in range(snake_case_ )]
snake_case__ : Optional[int] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(snake_case_ ) <= key:
return input_string
for position, character in enumerate(snake_case_ ):
snake_case__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : Dict = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case_ )
snake_case__ : Tuple = ["".join(snake_case_ ) for row in temp_grid]
snake_case__ : List[str] = "".join(snake_case_ )
return output_string
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
snake_case__ : Any = []
snake_case__ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
snake_case__ : list[list[str]] = [[] for _ in range(snake_case_ )] # generates template
for position in range(len(snake_case_ ) ):
snake_case__ : Optional[Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : str = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
snake_case__ : int = 0
for row in temp_grid: # fills in the characters
snake_case__ : Dict = input_string[counter : counter + len(snake_case_ )]
grid.append(list(snake_case_ ) )
counter += len(snake_case_ )
snake_case__ : Dict = "" # reads as zigzag
for position in range(len(snake_case_ ) ):
snake_case__ : List[Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : Tuple = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Optional[int] = {}
for key_guess in range(1 , len(snake_case_ ) ): # tries every key
snake_case__ : Tuple = decrypt(snake_case_ , snake_case_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "deberta-v2"
def __init__( self : Dict , __A : Dict=1_2_8_1_0_0 , __A : Any=1_5_3_6 , __A : Union[str, Any]=2_4 , __A : Optional[int]=2_4 , __A : Any=6_1_4_4 , __A : List[str]="gelu" , __A : List[str]=0.1 , __A : int=0.1 , __A : Dict=5_1_2 , __A : Optional[int]=0 , __A : Union[str, Any]=0.0_2 , __A : str=1e-7 , __A : Union[str, Any]=False , __A : str=-1 , __A : str=0 , __A : Optional[Any]=True , __A : Union[str, Any]=None , __A : Any=0 , __A : Any="gelu" , **__A : Optional[int] , ):
super().__init__(**__A )
snake_case__ : Optional[int] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Any = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : int = initializer_range
snake_case__ : List[Any] = relative_attention
snake_case__ : Dict = max_relative_positions
snake_case__ : Any = pad_token_id
snake_case__ : Any = position_biased_input
# Backwards compatibility
if type(__A ) == str:
snake_case__ : List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
snake_case__ : List[str] = pos_att_type
snake_case__ : Tuple = vocab_size
snake_case__ : List[str] = layer_norm_eps
snake_case__ : List[str] = kwargs.get("pooler_hidden_size" , __A )
snake_case__ : Tuple = pooler_dropout
snake_case__ : str = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : str ):
if self.task == "multiple-choice":
snake_case__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : List[Any] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowercase ( self : Dict ):
return 1_2
def _lowercase ( self : List[Any] , __A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __A : int = -1 , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional["TensorType"] = None , __A : int = 3 , __A : int = 4_0 , __A : int = 4_0 , __A : "PreTrainedTokenizerBase" = None , ):
snake_case__ : int = super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 720 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : Tuple , __A : Optional[int]=1_3 , __A : Tuple=1_0 , __A : List[Any]=3 , __A : Tuple=2 , __A : List[str]=2 , __A : Tuple=True , __A : Optional[Any]=True , __A : Optional[Any]=3_2 , __A : Optional[Any]=5 , __A : Any=4 , __A : Any=3_7 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : int=0.1 , __A : Dict=1_0 , __A : str=0.0_2 , __A : str="divided_space_time" , __A : List[Any]=None , ):
snake_case__ : List[str] = parent
snake_case__ : str = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Dict = num_channels
snake_case__ : List[str] = patch_size
snake_case__ : int = num_frames
snake_case__ : List[str] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : int = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[str] = attention_type
snake_case__ : int = initializer_range
snake_case__ : List[str] = scope
snake_case__ : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : str = (num_frames) * self.num_patches_per_frame + 1
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Any = None
if self.use_labels:
snake_case__ : str = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Any ):
snake_case__ : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
snake_case__ : List[Any] = self.num_labels
return config
def _lowercase ( self : Dict , __A : Tuple , __A : str , __A : Union[str, Any] ):
snake_case__ : Any = TimesformerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Dict , __A : List[Any] , __A : Tuple , __A : str ):
snake_case__ : Any = TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(__A )
# verify the logits shape
snake_case__ : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ : List[str] = config_and_inputs
snake_case__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = TimesformerModelTester(self )
snake_case__ : Tuple = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : Dict , __A : Tuple , __A : Dict , __A : Union[str, Any]=False ):
snake_case__ : Optional[Any] = copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
snake_case__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__A )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowercase ( self : Tuple ):
if not self.has_attentions:
pass
else:
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
snake_case__ : List[Any] = self.model_tester.seq_length
snake_case__ : Optional[int] = self.model_tester.num_frames
snake_case__ : int = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : List[Any] = True
snake_case__ : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : int = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
snake_case__ : Dict = len(__A )
# Check attention is always last and order is fine
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = True
snake_case__ : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
snake_case__ : str = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowercase ( self : Optional[int] ):
def check_hidden_states_output(__A : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = outputs.hidden_states
snake_case__ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
snake_case__ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : str = True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case__ : List[Any] = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__A )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : str = prepare_video()
snake_case__ : str = image_processor(video[:8] , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(**__A )
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Tuple = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 721 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [randint(-1_000 , 1_000 ) for i in range(10 )]
SCREAMING_SNAKE_CASE__ : Tuple = randint(-5_000 , 5_000 )
return (arr, r)
__lowercase :Any = make_dataset()
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
for triplet in permutations(_lowerCamelCase , 3 ):
if sum(_lowerCamelCase ) == target:
return tuple(sorted(_lowerCamelCase ) )
return (0, 0, 0)
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
arr.sort()
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_lowerCamelCase )
for i in range(n - 1 ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
SCREAMING_SNAKE_CASE__ : str = "\ntriplet_sum1(*dataset)\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\ntriplet_sum2(*dataset)\n"
SCREAMING_SNAKE_CASE__ : Any = repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=10_000 )
SCREAMING_SNAKE_CASE__ : Dict = repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=10_000 )
return (min(_lowerCamelCase ), min(_lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase :str = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
import math
import sys
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ""
try:
with open(_lowerCamelCase , "rb" ) as binary_file:
SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : List[str] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = {"0": "0", "1": "1"}
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = "", ""
SCREAMING_SNAKE_CASE__ : str = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE__ : int = last_match_id + "0"
if math.loga(_lowerCamelCase ).is_integer():
SCREAMING_SNAKE_CASE__ : Any = {}
for curr_key in list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = lexicon.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_lex
SCREAMING_SNAKE_CASE__ : List[str] = last_match_id + "1"
index += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = ""
return result
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 8
try:
with open(_lowerCamelCase , "wb" ) as opened_file:
SCREAMING_SNAKE_CASE__ : str = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowerCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE__ : int = data_bits[counter:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = read_file_binary(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = remove_prefix(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = decompress_data(_lowerCamelCase )
write_file_binary(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
import os
import sys
__lowercase :int = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase :List[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : Any , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : str , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : Tuple , **_lowerCamelCase : Dict ):
'''simple docstring'''
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : List[Any] , **_lowerCamelCase : Tuple ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : Any , **_lowerCamelCase : int ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase ( *_lowerCamelCase : Dict , **_lowerCamelCase : Dict ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowercase :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple , a : WhisperForConditionalGeneration , a : WhisperProcessor , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : StableDiffusionSafetyChecker , a : CLIPImageProcessor , ) ->Union[str, Any]:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a , speech_processor=a , vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , )
def A_ ( self : List[Any] , a : Optional[Union[str, int]] = "auto" ) ->int:
if slice_size == "auto":
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def A_ ( self : List[Any] ) ->str:
self.enable_attention_slicing(a )
@torch.no_grad()
def __call__( self : str , a : str , a : Tuple=1_60_00 , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : List[Any] , ) ->Any:
SCREAMING_SNAKE_CASE__ : List[str] = self.speech_processor.feature_extractor(
a , return_tensors="pt" , sampling_rate=a ).input_features.to(self.device )
SCREAMING_SNAKE_CASE__ : str = self.speech_model.generate(a , max_length=48_00_00 )
SCREAMING_SNAKE_CASE__ : int = self.speech_processor.tokenizer.batch_decode(a , skip_special_tokens=a , normalize=a )[
0
]
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : str = 1
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
a , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = text_embeddings.shape
SCREAMING_SNAKE_CASE__ : Dict = text_embeddings.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE__ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ : Dict = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Any = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE__ : List[str] = negative_prompt
SCREAMING_SNAKE_CASE__ : Optional[int] = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
a , padding="max_length" , max_length=a , truncation=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : str = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE__ : Any = uncond_embeddings.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE__ : Any = uncond_embeddings.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(a , generator=a , device="cpu" , dtype=a ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : int = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : str = eta
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : List[Any] = self.unet(a , a , encoder_hidden_states=a ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = 1 / 0.1_8215 * latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vae.decode(a ).sample
SCREAMING_SNAKE_CASE__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from __future__ import annotations
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : int , a : int , a : float = 0 ) ->None:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = row, column
SCREAMING_SNAKE_CASE__ : List[Any] = [[default_value for c in range(a )] for r in range(a )]
def __str__( self : List[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
SCREAMING_SNAKE_CASE__ : Tuple = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE__ : int = max(a , len(str(a ) ) )
SCREAMING_SNAKE_CASE__ : Dict = f"""%{max_element_length}s"""
# Make string and return
def single_line(a : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE__ : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a ) for row_vector in self.array )
return s
def __repr__( self : int ) ->str:
return str(self )
def A_ ( self : str , a : tuple[int, int] ) ->bool:
if not (isinstance(a , (list, tuple) ) and len(a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , a : tuple[int, int] ) ->Any:
assert self.validate_indicies(a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , a : tuple[int, int] , a : float ) ->None:
assert self.validate_indicies(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
def __add__( self : List[str] , a : Matrix ) ->Matrix:
assert isinstance(a , a )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE__ : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) ->Matrix:
SCREAMING_SNAKE_CASE__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ : Any = -self[r, c]
return result
def __sub__( self : Dict , a : Matrix ) ->Matrix:
return self + (-another)
def __mul__( self : Dict , a : int | float | Matrix ) ->Matrix:
if isinstance(a , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE__ : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ : Tuple = self[r, c] * another
return result
elif isinstance(a , a ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE__ : Optional[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""Unsupported type given for another ({type(a )})"""
raise TypeError(a )
def A_ ( self : Optional[int] ) ->Matrix:
SCREAMING_SNAKE_CASE__ : Optional[int] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ : Any = self[r, c]
return result
def A_ ( self : int , a : Matrix , a : Matrix ) ->Any:
assert isinstance(a , a ) and isinstance(a , a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE__ : Optional[int] = v.transpose()
SCREAMING_SNAKE_CASE__ : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE__ : int = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
SCREAMING_SNAKE_CASE__ : Optional[Any] = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = 1, 2, -3
SCREAMING_SNAKE_CASE__ : List[Any] = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}""" )
def UpperCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase :Any = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : str , *a : Optional[Any] , **a : int ) ->None:
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , a , )
super().__init__(*a , **a )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowercase :str = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def UpperCAmelCase ( _lowerCamelCase : np.ndarray , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = to_pil_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_lowerCamelCase , lang=_lowerCamelCase , output_type="dict" , config=_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : List[str] = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : Tuple = []
for x, y, w, h in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : float = 1 / 2_55 , a : bool = True , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = True , a : Optional[str] = None , a : Optional[str] = "" , **a : Optional[int] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[Any] = size
SCREAMING_SNAKE_CASE__ : Optional[int] = resample
SCREAMING_SNAKE_CASE__ : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE__ : List[Any] = rescale_value
SCREAMING_SNAKE_CASE__ : Dict = do_normalize
SCREAMING_SNAKE_CASE__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE__ : str = apply_ocr
SCREAMING_SNAKE_CASE__ : int = ocr_lang
SCREAMING_SNAKE_CASE__ : Optional[int] = tesseract_config
def A_ ( self : Any , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Any = (size["height"], size["width"])
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : Tuple , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : Union[float, Iterable[float]] , a : Union[float, Iterable[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Any , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : List[str]=None , a : bool = None , a : float = None , a : bool = None , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : int , ) ->PIL.Image.Image:
SCREAMING_SNAKE_CASE__ : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : str = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : str = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : int = [to_numpy_array(a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in images:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = apply_tesseract(a , a , a )
words_batch.append(a )
boxes_batch.append(a )
if do_resize:
SCREAMING_SNAKE_CASE__ : str = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Any = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=a )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : Any = words_batch
SCREAMING_SNAKE_CASE__ : Union[str, Any] = boxes_batch
return data
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = number
while duplicate > 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = divmod(_lowerCamelCase , 10 )
fact_sum += factorial(_lowerCamelCase )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
__lowercase :str = int(input("Enter number: ").strip())
print(
f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[str] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__lowercase :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
@staticmethod
def A_ ( ) ->int:
raise NotImplementedError
def A_ ( self : Optional[Any] , a : Any , a : int , a : str , **a : Dict ) ->Optional[int]:
raise NotImplementedError
def A_ ( self : str , a : str ) ->Tuple:
raise NotImplementedError
def A_ ( self : Optional[Any] ) ->Optional[int]:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def A_ ( cls : Union[str, Any] ) ->Any:
return f"""`pip install {cls.pip_package or cls.name}`"""
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "optuna"
@staticmethod
def A_ ( ) ->int:
return is_optuna_available()
def A_ ( self : List[str] , a : str , a : int , a : str , **a : Union[str, Any] ) ->Tuple:
return run_hp_search_optuna(a , a , a , **a )
def A_ ( self : List[Any] , a : List[Any] ) ->Tuple:
return default_hp_space_optuna(a )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "ray"
snake_case_ = "'ray[tune]'"
@staticmethod
def A_ ( ) ->Dict:
return is_ray_available()
def A_ ( self : int , a : Any , a : int , a : str , **a : Union[str, Any] ) ->Optional[Any]:
return run_hp_search_ray(a , a , a , **a )
def A_ ( self : Tuple , a : Union[str, Any] ) ->Optional[int]:
return default_hp_space_ray(a )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "sigopt"
@staticmethod
def A_ ( ) ->Any:
return is_sigopt_available()
def A_ ( self : Union[str, Any] , a : Tuple , a : int , a : str , **a : str ) ->Union[str, Any]:
return run_hp_search_sigopt(a , a , a , **a )
def A_ ( self : Tuple , a : List[Any] ) ->Any:
return default_hp_space_sigopt(a )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "wandb"
@staticmethod
def A_ ( ) ->Union[str, Any]:
return is_wandb_available()
def A_ ( self : Dict , a : Tuple , a : int , a : str , **a : Optional[Any] ) ->Union[str, Any]:
return run_hp_search_wandb(a , a , a , **a )
def A_ ( self : List[str] , a : Union[str, Any] ) ->int:
return default_hp_space_wandb(a )
__lowercase :Tuple = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
f"""{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : Optional[int] , a : Optional[Any]=13 , a : List[Any]=10 , a : str=3 , a : Tuple=2 , a : List[str]=2 , a : Union[str, Any]=2 , a : Optional[int]=True , a : Optional[Any]=True , a : Optional[int]=32 , a : Tuple=5 , a : Any=4 , a : Dict=37 , a : Union[str, Any]="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : int=10 , a : int=0.02 , a : Tuple=0.9 , a : Union[str, Any]=None , ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Any = tubelet_size
SCREAMING_SNAKE_CASE__ : str = num_frames
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = mask_ratio
SCREAMING_SNAKE_CASE__ : List[str] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE__ : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Tuple = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE__ : Tuple = int(mask_ratio * self.seq_length )
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self : str ) ->Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def A_ ( self : Dict , a : Optional[Any] , a : Dict , a : str ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = VideoMAEModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : str , a : Union[str, Any] , a : Tuple , a : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE__ : str = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE__ : Any = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE__ : Tuple = mask.expand(self.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE__ : Dict = model(a , a )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE__ : Dict = mask.sum().item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
snake_case_ = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def A_ ( self : Optional[int] , a : List[Any] , a : str , a : Dict=False ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE__ : str = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE__ : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE__ : List[str] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def A_ ( self : Optional[int] ) ->List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def A_ ( self : List[Any] ) ->Optional[int]:
pass
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def A_ ( self : Optional[Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def A_ ( self : int ) ->Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def A_ ( self : str ) ->Optional[Any]:
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE__ : Tuple = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(a )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A_ ( self : List[str] ) ->List[str]:
def check_hidden_states_output(a : Optional[int] , a : Optional[Any] , a : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
SCREAMING_SNAKE_CASE__ : str = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self : str ) ->int:
pass
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE__ : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self : Optional[Any] ) ->str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A_ ( self : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
a )
SCREAMING_SNAKE_CASE__ : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_video()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([0.3669, -0.0688, -0.2421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def A_ ( self : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_video()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(a , return_tensors="pt" ).to(a )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE__ : Any = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
SCREAMING_SNAKE_CASE__ : str = torch.load(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size([1, 14_08, 15_36] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([0.5142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=a ).to(
a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(torch.tensor([0.6469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1E-4 ) )
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowercase :Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : bool , _lowerCamelCase : bool ):
'''simple docstring'''
def run_func(_lowerCamelCase : Optional[Any] ):
@wraps(_lowerCamelCase )
def run_in_eager_mode(*_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[str] ):
return func(*_lowerCamelCase , **_lowerCamelCase )
@wraps(_lowerCamelCase )
@tf.function(experimental_compile=_lowerCamelCase )
def run_in_graph_mode(*_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[str] ):
return func(*_lowerCamelCase , **_lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = random.Random()
SCREAMING_SNAKE_CASE__ : Dict = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = "TensorFlow"
@property
def A_ ( self : Optional[Any] ) ->List[Any]:
return tf.__version__
def A_ ( self : Optional[Any] , a : str , a : int , a : int ) ->float:
# initialize GPU on separate process
SCREAMING_SNAKE_CASE__ : Dict = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_inference_func(a , a , a )
return self._measure_speed(_inference )
def A_ ( self : Optional[Any] , a : str , a : int , a : int ) ->float:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_train_func(a , a , a )
return self._measure_speed(_train )
def A_ ( self : List[Any] , a : str , a : int , a : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a )
SCREAMING_SNAKE_CASE__ : Dict = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_inference_func(a , a , a )
return self._measure_memory(_inference )
def A_ ( self : Dict , a : str , a : int , a : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a )
SCREAMING_SNAKE_CASE__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_train_func(a , a , a )
return self._measure_memory(_train )
def A_ ( self : Optional[Any] , a : str , a : int , a : int ) ->Callable[[], None]:
SCREAMING_SNAKE_CASE__ : List[str] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE__ : List[Any] = (
hasattr(a , "architectures" )
and isinstance(config.architectures , a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE__ : Optional[int] = __import__("transformers" , fromlist=[model_class] )
SCREAMING_SNAKE_CASE__ : Any = getattr(a , a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE__ : str = TF_MODEL_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE__ : str = config.vocab_size if hasattr(a , "vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = random_input_ids(a , a , a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a , decoder_input_ids=a , training=a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a , training=a )
SCREAMING_SNAKE_CASE__ : Tuple = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A_ ( self : str , a : str , a : int , a : int ) ->Callable[[], None]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE__ : List[str] = (
hasattr(a , "architectures" )
and isinstance(config.architectures , a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE__ : Any = __import__("transformers" , fromlist=[model_class] )
SCREAMING_SNAKE_CASE__ : Any = getattr(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE__ : List[str] = config.vocab_size if hasattr(a , "vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = random_input_ids(a , a , a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a , decoder_input_ids=a , labels=a , training=a )[0]
SCREAMING_SNAKE_CASE__ : Any = tf.gradients(a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
SCREAMING_SNAKE_CASE__ : List[str] = model(a , labels=a , training=a )[0]
SCREAMING_SNAKE_CASE__ : Tuple = tf.gradients(a , model.trainable_variables )
return gradients
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A_ ( self : Union[str, Any] , a : str ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE__ : List[Any] = timeit.repeat(
a , repeat=self.args.repeat , number=10 , )
return min(a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A_ ( self : Optional[Any] , a : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
SCREAMING_SNAKE_CASE__ : int = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
SCREAMING_SNAKE_CASE__ : Tuple = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE__ : Dict = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
SCREAMING_SNAKE_CASE__ : Tuple = nvml.nvmlDeviceGetMemoryInfo(a )
SCREAMING_SNAKE_CASE__ : int = meminfo.used
SCREAMING_SNAKE_CASE__ : Optional[Any] = Memory(a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
SCREAMING_SNAKE_CASE__ : str = None
else:
SCREAMING_SNAKE_CASE__ : List[Any] = measure_peak_memory_cpu(a )
SCREAMING_SNAKE_CASE__ : Any = Memory(a ) if isinstance(a , a ) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE__ : int = stop_memory_tracing(a )
if memory is None:
SCREAMING_SNAKE_CASE__ : Dict = summary.total
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , a : Optional[int] , a : str=13 , a : Tuple=7 , a : str=True , a : int=True , a : Any=True , a : List[str]=True , a : Dict=99 , a : Dict=[1, 1, 2] , a : Tuple=1 , a : Tuple=32 , a : str=4 , a : Optional[int]=8 , a : Optional[Any]=37 , a : str="gelu_new" , a : Dict=0.1 , a : str=0.1 , a : Optional[int]=0.0 , a : Any=5_12 , a : Optional[Any]=3 , a : List[Any]=0.02 , a : Optional[int]=3 , a : str=4 , a : Any=None , a : Tuple=False , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : int = seq_length
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = block_sizes
SCREAMING_SNAKE_CASE__ : int = num_decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE__ : Any = d_head
SCREAMING_SNAKE_CASE__ : int = d_inner
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : int = hidden_dropout
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE__ : Any = num_choices
SCREAMING_SNAKE_CASE__ : Dict = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE__ : Any = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE__ : Tuple = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE__ : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_hidden_layers + 2
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : int = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Any , a : Union[str, Any] , a : Optional[int] , a : Tuple , a : Any , a : Tuple , a : Tuple , a : str , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a )
SCREAMING_SNAKE_CASE__ : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Tuple = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Dict = TFFunnelModel(config=a )
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def A_ ( self : Any , a : str , a : Any , a : Dict , a : str , a : Union[str, Any] , a : List[str] , a : List[Any] , ) ->str:
SCREAMING_SNAKE_CASE__ : str = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a )
SCREAMING_SNAKE_CASE__ : Tuple = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[str] = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFFunnelBaseModel(config=a )
SCREAMING_SNAKE_CASE__ : List[str] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def A_ ( self : Any , a : str , a : Union[str, Any] , a : Union[str, Any] , a : int , a : Any , a : Optional[Any] , a : Tuple , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = TFFunnelForPreTraining(config=a )
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int , a : int , a : Optional[int] , a : Union[str, Any] , a : int , a : Optional[int] , a : Optional[Any] , a : Tuple , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = TFFunnelForMaskedLM(config=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , a : Dict , a : List[str] , a : int , a : Optional[Any] , a : Tuple , a : Any , a : Dict , ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = TFFunnelForSequenceClassification(config=a )
SCREAMING_SNAKE_CASE__ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Optional[Any] , a : Optional[Any] , a : str , a : int , a : int , a : Any , a : Optional[Any] , a : Union[str, Any] , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = self.num_choices
SCREAMING_SNAKE_CASE__ : int = TFFunnelForMultipleChoice(config=a )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : List[str] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ : Any = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[Any] , a : Any , a : Dict , a : Union[str, Any] , a : List[str] , a : Tuple , a : Optional[int] , a : Dict , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = TFFunnelForTokenClassification(config=a )
SCREAMING_SNAKE_CASE__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Union[str, Any] , a : str , a : Optional[Any] , a : Any , a : Union[str, Any] , a : int , a : str , a : Tuple , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = TFFunnelForQuestionAnswering(config=a )
SCREAMING_SNAKE_CASE__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE__ : int = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : str = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=a )
def A_ ( self : Any ) ->str:
self.config_tester.run_common_tests()
def A_ ( self : int ) ->int:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self : Union[str, Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def A_ ( self : Optional[int] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def A_ ( self : Optional[int] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
@require_tf
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = TFFunnelModelTester(self , base=a )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=a )
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def A_ ( self : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a )
def A_ ( self : Dict ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def A_ ( self : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase :List[str] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "ernie_m"
snake_case_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , a : int = 25_00_02 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_14 , a : float = 0.02 , a : int = 1 , a : float = 1E-05 , a : Optional[Any]=None , a : Any=False , a : int=0.0 , **a : Dict , ) ->Optional[int]:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = classifier_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_decoder
SCREAMING_SNAKE_CASE__ : Dict = act_dropout
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableDiffusionXLImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Union[str, Any] ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE__ : str = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=32 , )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextModel(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModelWithProjection(a )
SCREAMING_SNAKE_CASE__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=a )
SCREAMING_SNAKE_CASE__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A_ ( self : Dict , a : Any , a : Union[str, Any]=0 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = image / 2 + 0.5
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = StableDiffusionXLImgaImgPipeline(**a )
SCREAMING_SNAKE_CASE__ : str = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe(**a ).images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : Tuple ) ->List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A_ ( self : Any ) ->Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A_ ( self : Dict ) ->int:
pass
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionXLImgaImgPipeline(**a )
SCREAMING_SNAKE_CASE__ : str = sd_pipe.to(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
# forward without prompt embeds
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : Any = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE__ : Dict = negative_prompt
SCREAMING_SNAKE_CASE__ : str = 3 * [inputs["prompt"]]
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : List[Any] = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE__ : List[str] = 3 * [inputs.pop("prompt" )]
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Optional[Any] = sd_pipe.encode_prompt(a , negative_prompt=a )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(
**a , prompt_embeds=a , negative_prompt_embeds=a , pooled_prompt_embeds=a , negative_pooled_prompt_embeds=a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) ->Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : int , a : Tuple , a : Dict="cpu" , a : Tuple=torch.floataa , a : List[str]=0 ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE__ : str = np.random.RandomState(a ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.from_numpy(a ).to(device=a , dtype=a )
SCREAMING_SNAKE_CASE__ : Any = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Dict = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_inputs(a )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe(**a ).images
SCREAMING_SNAKE_CASE__ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase :int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Tuple = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowercase :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :Dict = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : Dict , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , a : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **a : int , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {"shortest_edge": 2_24}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : Dict = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : int = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ : str = do_resize
SCREAMING_SNAKE_CASE__ : str = size
SCREAMING_SNAKE_CASE__ : Optional[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE__ : Dict = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE__ : List[Any] = int((2_56 / 2_24) * size["shortest_edge"] )
SCREAMING_SNAKE_CASE__ : Tuple = get_resize_output_image_size(a , size=a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : int = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
a , size=(size_dict["height"], size_dict["width"]) , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Dict , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Any , a : ImageInput , a : Optional[bool] = None , a : Optional[Dict[str, int]] = None , a : PILImageResampling = None , a : Optional[bool] = None , a : Optional[Dict[str, int]] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, Iterable[float]]] = None , a : Optional[Union[float, Iterable[float]]] = None , a : Optional[TensorType] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) ->BatchFeature:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : int = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ : List[str] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[Any] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.resize(a , a , a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : Tuple = [self.center_crop(a , a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(a , a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(a , a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : List[Any] = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import math
__lowercase :Optional[int] = 10
__lowercase :Optional[Any] = 7
__lowercase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCAmelCase ( _lowerCamelCase : int = 20 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = math.comb(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase :Tuple = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__lowercase :Dict = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__lowercase :Union[str, Any] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__lowercase :Union[str, Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowercase :Any = datasets.utils.logging.get_logger(__name__)
__lowercase :Tuple = ["names", "prefix"]
__lowercase :str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__lowercase :List[Any] = ["encoding_errors", "on_bad_lines"]
__lowercase :List[Any] = ["date_format"]
@dataclass
class _a ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case_ = ","
snake_case_ = None
snake_case_ = "infer"
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = False
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = None
snake_case_ = "."
snake_case_ = None
snake_case_ = '"'
snake_case_ = 0
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = None
snake_case_ = 1_00_00
snake_case_ = None
snake_case_ = "strict"
snake_case_ = "error"
snake_case_ = None
def A_ ( self : Optional[int] ) ->int:
if self.delimiter is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE__ : int = self.column_names
@property
def A_ ( self : List[Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case_ = CsvConfig
def A_ ( self : Union[str, Any] ) ->int:
return datasets.DatasetInfo(features=self.config.features )
def A_ ( self : Any , a : List[str] ) ->int:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ : List[str] = data_files
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : str = [files]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [dl_manager.iter_files(a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE__ : str = []
for split_name, files in data_files.items():
if isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Any = [files]
SCREAMING_SNAKE_CASE__ : int = [dl_manager.iter_files(a ) for file in files]
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={"files": files} ) )
return splits
def A_ ( self : Optional[Any] , a : pa.Table ) ->pa.Table:
if self.config.features is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(a ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE__ : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ : int = table_cast(a , a )
return pa_table
def A_ ( self : List[Any] , a : str ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE__ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = pd.read_csv(a , iterator=a , dtype=a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a ):
SCREAMING_SNAKE_CASE__ : List[str] = pa.Table.from_pandas(a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__lowercase :str = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__lowercase :Tuple = []
__lowercase :Dict = []
__lowercase :Union[str, Any] = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__lowercase :Dict = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
__lowercase :Union[str, Any] = 0
for log in Path().glob("*.log"):
__lowercase :Optional[int] = 0
with open(log, "r") as f:
for line in f:
__lowercase :Dict = json.loads(line)
if line.get("nodeid", "") != "":
__lowercase :Optional[Any] = line["nodeid"]
if line.get("duration", None) is not None:
__lowercase :Optional[Any] = f"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__lowercase :Any = []
log.unlink()
__lowercase :Optional[Any] = ""
__lowercase :int = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__lowercase :int = []
__lowercase :Tuple = {}
for test in failed_tests:
__lowercase :Optional[Any] = test[0].split("::")
__lowercase :str = data[0].split("/")[-1]
if data[0] not in filesafailed:
__lowercase :int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__lowercase :Union[str, Any] = [test[0] for test in failed_table]
__lowercase :Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
__lowercase :Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__lowercase :Tuple = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
__lowercase :Tuple = "Too many failed tests, please see the full report in the Action results."
__lowercase :Optional[int] = len(err) + 10
__lowercase :List[Any] = message[: 3_000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
__lowercase :Optional[int] = "No failed tests! 🤗"
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__lowercase :Any = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__lowercase :Union[str, Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__lowercase :List[Any] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
__lowercase :Dict = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
__lowercase :int = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__lowercase :Tuple = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__lowercase :Optional[int] = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__lowercase :List[Any] = row[0]
else:
__lowercase :List[Any] = ""
__lowercase :List[str] = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BioGptTokenizer
snake_case_ = False
def A_ ( self : int ) ->Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE__ : Dict = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a ) )
def A_ ( self : List[str] , a : Optional[int] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = "lower newer"
SCREAMING_SNAKE_CASE__ : str = "lower newer"
return input_text, output_text
def A_ ( self : Optional[int] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ : Optional[int] = "lower"
SCREAMING_SNAKE_CASE__ : Tuple = ["low", "er</w>"]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize(a )
self.assertListEqual(a , a )
SCREAMING_SNAKE_CASE__ : Dict = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE__ : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def A_ ( self : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a , a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowercase :List[Any] = 4
__lowercase :Any = 3
class _a ( lowercase__ ):
"""simple docstring"""
pass
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(_lowerCamelCase ):
yield {"i": i, "shard": shard}
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(os.environ["RANK"] )
SCREAMING_SNAKE_CASE__ : int = int(os.environ["WORLD_SIZE"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase )
parser.add_argument("--local_rank" , type=_lowerCamelCase )
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple = args.streaming
SCREAMING_SNAKE_CASE__ : Any = args.num_workers
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"shards": [f"""shard_{shard_idx}""" for shard_idx in range(_lowerCamelCase )]}
SCREAMING_SNAKE_CASE__ : Any = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase )
if not streaming:
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(list(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ : Dict = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowerCamelCase )] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowerCamelCase ) ) , x.transpose() ) , _lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (1, 2, 1)
SCREAMING_SNAKE_CASE__ : Optional[int] = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE__ : Optional[int] = SARIMAX(
_lowerCamelCase , exog=_lowerCamelCase , order=_lowerCamelCase , seasonal_order=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = model.fit(disp=_lowerCamelCase , maxiter=600 , method="nm" )
SCREAMING_SNAKE_CASE__ : List[Any] = model_fit.predict(1 , len(_lowerCamelCase ) , exog=[test_match] )
return result[0]
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : list ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = regressor.predict(_lowerCamelCase )
return y_pred[0]
def UpperCAmelCase ( _lowerCamelCase : list ):
'''simple docstring'''
train_user.sort()
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.percentile(_lowerCamelCase , 25 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.percentile(_lowerCamelCase , 75 )
SCREAMING_SNAKE_CASE__ : Dict = qa - qa
SCREAMING_SNAKE_CASE__ : List[Any] = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : str = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE__ : Any = not_safe + 1
else:
if abs(abs(_lowerCamelCase ) - abs(_lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowercase :str = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__lowercase :Any = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__lowercase :str = Normalizer().fit_transform(data_input_df.values)
# split data
__lowercase :Tuple = normalize_df[:, 2].tolist()
__lowercase :List[Any] = normalize_df[:, 0].tolist()
__lowercase :str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowercase :Optional[Any] = normalize_df[:, [1, 2]].tolist()
__lowercase :List[Any] = x[: len(x) - 1]
__lowercase :Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
__lowercase :List[Any] = total_date[: len(total_date) - 1]
__lowercase :List[Any] = total_user[: len(total_user) - 1]
__lowercase :Any = total_match[: len(total_match) - 1]
__lowercase :Optional[Any] = total_date[len(total_date) - 1 :]
__lowercase :Optional[Any] = total_user[len(total_user) - 1 :]
__lowercase :Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowercase :Optional[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowercase :Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
"""simple docstring"""
def __init__( self : Any , a : List[Any] , a : Any=13 , a : Dict=7 , a : int=True , a : List[str]=True , a : Union[str, Any]=False , a : Union[str, Any]=True , a : List[Any]=99 , a : Dict=32 , a : List[Any]=5 , a : int=4 , a : Tuple=37 , a : Any="gelu" , a : Dict=0.1 , a : Tuple=0.1 , a : Union[str, Any]=5_12 , a : List[str]=16 , a : Any=2 , a : List[str]=0.02 , a : Any=3 , a : int=4 , a : List[str]=None , ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = scope
def A_ ( self : List[Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Union[str, Any] ) ->Any:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def A_ ( self : List[Any] , a : str , a : int , a : List[str] , a : Optional[Any] , a : str , a : List[str] , a : Optional[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = LlamaModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a , attention_mask=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Optional[Any] , a : List[Any] , a : List[str] , a : Union[str, Any] , a : Optional[int] , a : int , a : Dict , a : Union[str, Any] , a : str , a : Optional[Any] , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Any = LlamaModel(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE__ : int = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE__ : int = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , a : Union[str, Any] , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Tuple , a : List[Any] , a : Any , a : Optional[Any] , a : Optional[int] , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : str , a : str , a : List[Any] , a : Tuple , a : Optional[int] , a : Dict , a : Optional[int] , a : Dict , a : Optional[int] , a : Dict , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[str] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : int = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )["hidden_states"][0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def A_ ( self : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case_ = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Union[str, Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = LlamaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def A_ ( self : Union[str, Any] ) ->List[str]:
self.config_tester.run_common_tests()
def A_ ( self : Union[str, Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self : List[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = type
self.model_tester.create_and_check_model(*a )
def A_ ( self : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : List[str] = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : int = input_ids.ne(1 ).to(a )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : str ) ->int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = "single_label_classification"
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids.ne(1 ).to(a )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Dict = "multi_label_classification"
SCREAMING_SNAKE_CASE__ : List[str] = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids.ne(1 ).to(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def A_ ( self : Union[str, Any] ) ->Dict:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def A_ ( self : str , a : Optional[int] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = original_model(a ).last_hidden_state
SCREAMING_SNAKE_CASE__ : Union[str, Any] = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE__ : str = {"type": scaling_type, "factor": 10.0}
SCREAMING_SNAKE_CASE__ : int = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = scaled_model(a ).last_hidden_state
SCREAMING_SNAKE_CASE__ : Any = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
SCREAMING_SNAKE_CASE__ : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def A_ ( self : Optional[int] ) ->Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
SCREAMING_SNAKE_CASE__ : List[Any] = model(torch.tensor(a ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def A_ ( self : str ) ->Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
SCREAMING_SNAKE_CASE__ : List[str] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
SCREAMING_SNAKE_CASE__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def A_ ( self : Tuple ) ->Any:
SCREAMING_SNAKE_CASE__ : Any = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
SCREAMING_SNAKE_CASE__ : str = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
SCREAMING_SNAKE_CASE__ : Tuple = model(torch.tensor(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : int = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
SCREAMING_SNAKE_CASE__ : Tuple = "Simply put, the theory of relativity states that "
SCREAMING_SNAKE_CASE__ : Optional[int] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(a , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=a )
# greedy generation outputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = (EulerDiscreteScheduler,)
snake_case_ = 10
def A_ ( self : Union[str, Any] , **a : Union[str, Any] ) ->int:
SCREAMING_SNAKE_CASE__ : List[str] = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a )
return config
def A_ ( self : List[str] ) ->Any:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def A_ ( self : List[str] ) ->Tuple:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def A_ ( self : str ) ->Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def A_ ( self : Optional[int] ) ->Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def A_ ( self : Union[str, Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(a , a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(a , a , a , generator=a )
SCREAMING_SNAKE_CASE__ : str = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def A_ ( self : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Any = scheduler.scale_model_input(a , a )
SCREAMING_SNAKE_CASE__ : str = model(a , a )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(a , a , a , generator=a )
SCREAMING_SNAKE_CASE__ : Any = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE__ : Tuple = sample.to(a )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(a , a )
SCREAMING_SNAKE_CASE__ : Any = model(a , a )
SCREAMING_SNAKE_CASE__ : int = scheduler.step(a , a , a , generator=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def A_ ( self : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE__ : Tuple = sample.to(a )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.scale_model_input(a , a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a , a )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(a , a , a , generator=a )
SCREAMING_SNAKE_CASE__ : List[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(a ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
__lowercase :int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowercase :Any = [{"type": "code", "content": INSTALL_CONTENT}]
__lowercase :Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowercase :Union[str, Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__lowercase :Tuple = dataset.iloc[:, 1:2].values
__lowercase :Optional[Any] = dataset.iloc[:, 2].values
__lowercase , __lowercase , __lowercase , __lowercase :List[Any] = train_test_split(X, y, test_size=0.2, random_state=0)
__lowercase :Any = PolynomialFeatures(degree=4)
__lowercase :Union[str, Any] = poly_reg.fit_transform(X)
__lowercase :Optional[int] = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCAmelCase ( ):
'''simple docstring'''
plt.scatter(_lowerCamelCase , _lowerCamelCase , color="red" )
plt.plot(_lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(_lowerCamelCase ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
__lowercase :Optional[int] = 0 # The first color of the flag.
__lowercase :Union[str, Any] = 1 # The second color of the flag.
__lowercase :List[str] = 2 # The third color of the flag.
__lowercase :Optional[Any] = (red, white, blue)
def UpperCAmelCase ( _lowerCamelCase : list ):
'''simple docstring'''
if not sequence:
return []
if len(_lowerCamelCase ) == 1:
return list(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowerCamelCase ) - 1
SCREAMING_SNAKE_CASE__ : Tuple = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase :Union[str, Any] = input("Enter numbers separated by commas:\n").strip()
__lowercase :Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class _a :
"""simple docstring"""
def __init__( self : List[Any] , a : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = value
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : Node ) ->None:
SCREAMING_SNAKE_CASE__ : int = tree
def A_ ( self : str , a : Node | None ) ->int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[Any] ) ->Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :Tuple = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : str ) ->Any:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a )
def A_ ( self : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(a )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(a ):
self.assertDictEqual(a , example_records[i] )
def A_ ( self : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Tuple = Dataset.from_list(a )
SCREAMING_SNAKE_CASE__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A_ ( self : Optional[Any] ) ->List[Any]: # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : Optional[int] = [{"col_1": 1}, {"col_2": "x"}]
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_list(a )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A_ ( self : Tuple ) ->List[str]: # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : Any = [{"col_1": []}, {"col_1": [1, 2]}]
SCREAMING_SNAKE_CASE__ : List[str] = Dataset.from_list(a )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : int , a : list[list[int]] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(a ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a ) != cols:
raise error
for value in row:
if not isinstance(a , (int, float) ):
raise error
SCREAMING_SNAKE_CASE__ : Tuple = rows
else:
SCREAMING_SNAKE_CASE__ : Dict = []
def A_ ( self : Dict ) ->list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A_ ( self : str ) ->int:
return len(self.rows )
@property
def A_ ( self : Optional[int] ) ->int:
return len(self.rows[0] )
@property
def A_ ( self : Optional[int] ) ->tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def A_ ( self : Union[str, Any] ) ->bool:
return self.order[0] == self.order[1]
def A_ ( self : str ) ->Matrix:
SCREAMING_SNAKE_CASE__ : int = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a )
def A_ ( self : List[str] ) ->int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A_ ( self : Dict ) ->bool:
return bool(self.determinant() )
def A_ ( self : Tuple , a : int , a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a ).determinant()
def A_ ( self : Any , a : int , a : int ) ->int:
if (row + column) % 2 == 0:
return self.get_minor(a , a )
return -1 * self.get_minor(a , a )
def A_ ( self : Optional[int] ) ->Matrix:
return Matrix(
[
[self.get_minor(a , a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A_ ( self : List[str] ) ->Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A_ ( self : Optional[int] ) ->Matrix:
SCREAMING_SNAKE_CASE__ : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a )
def A_ ( self : Optional[Any] ) ->Matrix:
SCREAMING_SNAKE_CASE__ : List[str] = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[Any] ) ->str:
return str(self.rows )
def __str__( self : str ) ->str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(a ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def A_ ( self : Optional[Any] , a : list[int] , a : int | None = None ) ->None:
SCREAMING_SNAKE_CASE__ : Dict = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(a , a ):
raise type_error
for value in row:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def A_ ( self : Optional[Any] , a : list[int] , a : int | None = None ) ->None:
SCREAMING_SNAKE_CASE__ : List[Any] = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(a , a ):
raise type_error
for value in column:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , a : object ) ->bool:
if not isinstance(a , a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , a : object ) ->bool:
return not self == other
def __neg__( self : List[str] ) ->Matrix:
return self * -1
def __add__( self : Union[str, Any] , a : Matrix ) ->Matrix:
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , a : Matrix ) ->Matrix:
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , a : Matrix | int | float ) ->Matrix:
if isinstance(a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a , a ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(a , a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : List[Any] , a : int ) ->Matrix:
if not isinstance(a , a ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A_ ( cls : List[Any] , a : list[int] , a : list[int] ) ->int:
return sum(row[i] * column[i] for i in range(len(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : int = [1] * len(_lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
SCREAMING_SNAKE_CASE__ : List[Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
print(max(_lowerCamelCase ) )
# Adjacency list of Graph
__lowercase :Tuple = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : list[list[int | float]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = len(matrix[0] )
SCREAMING_SNAKE_CASE__ : Any = min(_lowerCamelCase , _lowerCamelCase )
for row in range(_lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase , _lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE__ : Any = True
for i in range(row + 1 , _lowerCamelCase ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = int(_lowerCamelCase )
# Initialize Result
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowercase :Dict = []
__lowercase :Tuple = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__lowercase :str = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
__lowercase :str = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__lowercase :Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__lowercase :Tuple = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"Following is minimal change for {value}: ")
__lowercase :Dict = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Optional[int] , *a : Optional[int] , **a : List[str] ) ->Tuple:
requires_backends(self , ["vision"] )
super().__init__(*a , **a )
def A_ ( self : Tuple , a : "Image" ) ->Optional[Any]:
return self.pre_processor(images=a , return_tensors="pt" )
def A_ ( self : Any , a : Any ) ->Any:
return self.model.generate(**a )
def A_ ( self : List[Any] , a : Optional[Any] ) ->Optional[Any]:
return self.pre_processor.batch_decode(a , skip_special_tokens=a )[0].strip()
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
import torch
def UpperCAmelCase ( ):
'''simple docstring'''
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : str = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__ : str = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowercase :List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = tensor_name.split("." )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE__ : Any = new_module
SCREAMING_SNAKE_CASE__ : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE__ : int = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Dict = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("cpu" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(_lowerCamelCase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Dict = new_value.T
SCREAMING_SNAKE_CASE__ : int = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : List[str] = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowerCamelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : List[str] = old_value.to(_lowerCamelCase )
elif isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = value.to(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(_lowerCamelCase , device=_lowerCamelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_value
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
current_key_name.append(_lowerCamelCase )
if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : Any = module.in_features
SCREAMING_SNAKE_CASE__ : int = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : str = bnb.nn.LinearabitLt(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = bnb.nn.Linearabit(
_lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : str = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Tuple = type(_lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCamelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None , _lowerCamelCase : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def UpperCAmelCase ( *_lowerCamelCase : Dict , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowerCamelCase , )
return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase ( *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowerCamelCase , )
return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(_lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = sum(_lowerCamelCase , [] )
SCREAMING_SNAKE_CASE__ : int = len(_lowerCamelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(_lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[int] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : int = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : int = set(_lowerCamelCase ) - set(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Dict = [".weight", ".bias"]
SCREAMING_SNAKE_CASE__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace(_lowerCamelCase , "" )
filtered_module_names.append(_lowerCamelCase )
return filtered_module_names
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase :Optional[int] = logging.get_logger(__name__)
__lowercase :Any = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "xlm"
snake_case_ = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : Any , a : Optional[int]=3_01_45 , a : Any=20_48 , a : Dict=12 , a : Optional[Any]=16 , a : int=0.1 , a : Optional[int]=0.1 , a : Union[str, Any]=True , a : Optional[int]=False , a : Any=False , a : Optional[Any]=False , a : int=1 , a : str=True , a : int=5_12 , a : Optional[Any]=20_48**-0.5 , a : Tuple=1E-12 , a : Optional[Any]=0.02 , a : Tuple=0 , a : Optional[Any]=1 , a : Any=2 , a : Dict=3 , a : Any=5 , a : List[Any]=True , a : List[Any]="first" , a : Dict=True , a : Any=None , a : int=True , a : Any=0.1 , a : Tuple=5 , a : Any=5 , a : Union[str, Any]=0 , a : int=0 , a : List[Any]=2 , a : Optional[Any]=0 , **a : List[Any] , ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = emb_dim
SCREAMING_SNAKE_CASE__ : Dict = n_layers
SCREAMING_SNAKE_CASE__ : List[Any] = n_heads
SCREAMING_SNAKE_CASE__ : Tuple = dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = gelu_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ : Any = causal
SCREAMING_SNAKE_CASE__ : Tuple = asm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_langs
SCREAMING_SNAKE_CASE__ : str = use_lang_emb
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = bos_index
SCREAMING_SNAKE_CASE__ : int = eos_index
SCREAMING_SNAKE_CASE__ : Dict = pad_index
SCREAMING_SNAKE_CASE__ : Tuple = unk_index
SCREAMING_SNAKE_CASE__ : List[str] = mask_index
SCREAMING_SNAKE_CASE__ : str = is_encoder
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = embed_init_std
SCREAMING_SNAKE_CASE__ : Tuple = init_std
SCREAMING_SNAKE_CASE__ : Tuple = summary_type
SCREAMING_SNAKE_CASE__ : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE__ : Optional[Any] = summary_activation
SCREAMING_SNAKE_CASE__ : Any = summary_proj_to_labels
SCREAMING_SNAKE_CASE__ : str = summary_first_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = start_n_top
SCREAMING_SNAKE_CASE__ : Union[str, Any] = end_n_top
SCREAMING_SNAKE_CASE__ : int = mask_token_id
SCREAMING_SNAKE_CASE__ : Any = lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE__ : Tuple = kwargs["n_words"]
super().__init__(pad_token_id=a , bos_token_id=a , **a )
class _a ( lowercase__ ):
"""simple docstring"""
@property
def A_ ( self : Any ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase :Optional[int] = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , *a : Any , **a : Optional[int] ) ->None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a , )
super().__init__(*a , **a )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__lowercase :Any = "http://www.mocksite.com/file1.txt"
__lowercase :Dict = "\"text\": [\"foo\", \"foo\"]"
__lowercase :Optional[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _a :
"""simple docstring"""
snake_case_ = 2_00
snake_case_ = {"Content-Length": "100"}
snake_case_ = {}
def A_ ( self : str , **a : Union[str, Any] ) ->str:
return [bytes(a , "utf-8" )]
def UpperCAmelCase ( *_lowerCamelCase : Dict , **_lowerCamelCase : int ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] ):
'''simple docstring'''
import requests
monkeypatch.setattr(_lowerCamelCase , "request" , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"train": url}
SCREAMING_SNAKE_CASE__ : Tuple = "dummy"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "downloads"
SCREAMING_SNAKE_CASE__ : List[Any] = tmp_path
SCREAMING_SNAKE_CASE__ : Optional[int] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = dl_manager.download(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : str = [downloaded_paths]
SCREAMING_SNAKE_CASE__ : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = downloaded_paths.values()
SCREAMING_SNAKE_CASE__ : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE__ : Dict = Path(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE__ : Optional[int] = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE__ : Union[str, Any] = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = {"train": filename}
SCREAMING_SNAKE_CASE__ : Tuple = "dummy"
SCREAMING_SNAKE_CASE__ : int = xz_file.parent
SCREAMING_SNAKE_CASE__ : List[Any] = "extracted"
SCREAMING_SNAKE_CASE__ : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : Tuple = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = dl_manager.extract(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = [extracted_paths]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE__ : str = extracted_paths.values()
SCREAMING_SNAKE_CASE__ : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE__ : Any = Path(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE__ : int = extracted_path.read_text()
SCREAMING_SNAKE_CASE__ : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = request.getfixturevalue(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = request.getfixturevalue(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _a :
"""simple docstring"""
def __init__( self : List[str] , a : Collection[float] | None = None ) ->None:
if components is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = list(a )
def __len__( self : Dict ) ->int:
return len(self.__components )
def __str__( self : Union[str, Any] ) ->str:
return "(" + ",".join(map(a , self.__components ) ) + ")"
def __add__( self : Optional[Any] , a : Vector ) ->Vector:
SCREAMING_SNAKE_CASE__ : Any = len(self )
if size == len(a ):
SCREAMING_SNAKE_CASE__ : Tuple = [self.__components[i] + other.component(a ) for i in range(a )]
return Vector(a )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , a : Vector ) ->Vector:
SCREAMING_SNAKE_CASE__ : Dict = len(self )
if size == len(a ):
SCREAMING_SNAKE_CASE__ : List[str] = [self.__components[i] - other.component(a ) for i in range(a )]
return Vector(a )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , a : float ) ->Vector:
...
@overload
def __mul__( self : Optional[int] , a : Vector ) ->float:
...
def __mul__( self : int , a : float | Vector ) ->float | Vector:
if isinstance(a , (float, int) ):
SCREAMING_SNAKE_CASE__ : List[str] = [c * other for c in self.__components]
return Vector(a )
elif isinstance(a , a ) and len(self ) == len(a ):
SCREAMING_SNAKE_CASE__ : Any = len(self )
SCREAMING_SNAKE_CASE__ : Any = [self.__components[i] * other.component(a ) for i in range(a )]
return sum(a )
else: # error case
raise Exception("invalid operand!" )
def A_ ( self : Optional[int] ) ->Vector:
return Vector(self.__components )
def A_ ( self : Dict , a : int ) ->float:
if isinstance(a , a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def A_ ( self : Union[str, Any] , a : int , a : float ) ->None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE__ : Any = value
def A_ ( self : str ) ->float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(a ) )
def A_ ( self : Dict , a : Vector , a : bool = False ) ->float:
SCREAMING_SNAKE_CASE__ : Dict = self * other
SCREAMING_SNAKE_CASE__ : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
return Vector([0] * dimension )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (isinstance(_lowerCamelCase , _lowerCamelCase ))
SCREAMING_SNAKE_CASE__ : str = [0] * dimension
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
return Vector(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : Vector , _lowerCamelCase : Vector ):
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (isinstance(_lowerCamelCase , (int, float) ))
)
return x * scalar + y
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
random.seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , a : list[list[float]] , a : int , a : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = matrix
SCREAMING_SNAKE_CASE__ : Dict = w
SCREAMING_SNAKE_CASE__ : int = h
def __str__( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , a : Matrix ) ->Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Any = [
self.__matrix[i][j] + other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : int , a : Matrix ) ->Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : List[Any] = [
self.__matrix[i][j] - other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , a : float ) ->Matrix:
...
@overload
def __mul__( self : int , a : Vector ) ->Vector:
...
def __mul__( self : Dict , a : float | Vector ) ->Vector | Matrix:
if isinstance(a , a ): # matrix-vector
if len(a ) == self.__width:
SCREAMING_SNAKE_CASE__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [
self.__matrix[i][j] * other.component(a )
for j in range(self.__width )
]
ans.change_component(a , sum(a ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(a , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE__ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a , self.__width , self.__height )
return None
def A_ ( self : List[str] ) ->int:
return self.__height
def A_ ( self : int ) ->int:
return self.__width
def A_ ( self : str , a : int , a : int ) ->float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def A_ ( self : Any , a : int , a : int , a : float ) ->None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE__ : Dict = value
else:
raise Exception("change_component: indices out of bounds" )
def A_ ( self : Optional[Any] , a : int , a : int ) ->float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a ) ):
SCREAMING_SNAKE_CASE__ : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a , self.__width - 1 , self.__height - 1 ).determinant()
def A_ ( self : Dict , a : int , a : int ) ->float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a , a )
else:
raise Exception("Indices out of bounds" )
def A_ ( self : List[str] ) ->float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , a ) for y in range(self.__width )
]
return sum(a )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
random.seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : list[list[float]] = [
[random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowercase :List[Any] = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["input_features", "attention_mask"]
def __init__( self : Optional[int] , a : Tuple=80 , a : Union[str, Any]=1_60_00 , a : Optional[Any]=80 , a : Union[str, Any]=0.0 , a : Optional[Any]=True , a : int=True , a : int=True , **a : Optional[Any] , ) ->Tuple:
super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a )
SCREAMING_SNAKE_CASE__ : List[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Dict = do_ceptral_normalize
SCREAMING_SNAKE_CASE__ : List[str] = normalize_means
SCREAMING_SNAKE_CASE__ : List[str] = normalize_vars
SCREAMING_SNAKE_CASE__ : Any = True
def A_ ( self : Optional[Any] , a : np.ndarray , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE__ : List[Any] = torch.from_numpy(a ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : int = ta_kaldi.fbank(a , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A_ ( a : np.ndarray , a : int , a : Optional[bool] = True , a : Optional[bool] = True , a : float = 0.0 , ) ->np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
SCREAMING_SNAKE_CASE__ : List[str] = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ : Tuple = np.subtract(a , a )
if normalize_vars:
SCREAMING_SNAKE_CASE__ : Dict = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ : List[str] = np.divide(a , a )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ : Optional[Any] = x.astype(np.floataa )
return x
def A_ ( self : List[str] , a : List[np.ndarray] , a : Optional[np.ndarray] = None ) ->List[np.ndarray]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(a , a , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(a , a )
]
def __call__( self : Optional[int] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Union[bool, str, PaddingStrategy] = False , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[int] = None , a : Optional[bool] = None , **a : List[Any] , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE__ : List[str] = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ : Any = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ : Any = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : Any = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ : List[Any] = [self._extract_fbank_features(a ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({"input_features": features} )
SCREAMING_SNAKE_CASE__ : Any = self.pad(
a , padding=a , max_length=a , truncation=a , pad_to_multiple_of=a , return_attention_mask=a , **a , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ : List[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = [np.asarray(a , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
np.array(a , dtype=np.intaa )
if self._get_padding_strategies(a , max_length=a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__ : Any = self.normalize(
padded_inputs["input_features"] , attention_mask=a )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : List[str] = padded_inputs.convert_to_tensors(a )
return padded_inputs
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
from sklearn.metrics import recall_score
import datasets
__lowercase :List[Any] = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__lowercase :List[str] = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__lowercase :Union[str, Any] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : Dict ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def A_ ( self : Any , a : str , a : Union[str, Any] , a : str=None , a : Union[str, Any]=1 , a : str="binary" , a : List[str]=None , a : Optional[Any]="warn" , ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = recall_score(
a , a , labels=a , pos_label=a , average=a , sample_weight=a , zero_division=a , )
return {"recall": float(a ) if score.size == 1 else score}
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowercase :Tuple = 16
__lowercase :Dict = 32
def UpperCAmelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Dict = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : str = 8
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowercase :List[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Any ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Tuple = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Dict = config["lr"]
SCREAMING_SNAKE_CASE__ : str = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE__ : int = int(config["seed"] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowerCamelCase )
def inner_training_loop(_lowerCamelCase : Any ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = AdamW(params=model.parameters() , lr=_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = outputs.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = _distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Dict = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = AltDiffusionPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[int] ) ->Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextModel(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 77
SCREAMING_SNAKE_CASE__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Tuple , a : List[str] , a : int=0 ) ->Union[str, Any]:
if str(a ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = torch.Generator(device=a ).manual_seed(a )
SCREAMING_SNAKE_CASE__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A_ ( self : int ) ->List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A_ ( self : Optional[int] ) ->Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A_ ( self : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RobertaSeriesModelWithTransformation(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_encoder
SCREAMING_SNAKE_CASE__ : str = AltDiffusionPipeline(**a )
SCREAMING_SNAKE_CASE__ : str = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : Any = "A photo of an astronaut"
SCREAMING_SNAKE_CASE__ : List[Any] = alt_pipe(**a )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE__ : Any = RobertaSeriesModelWithTransformation(a )
SCREAMING_SNAKE_CASE__ : Any = text_encoder
SCREAMING_SNAKE_CASE__ : List[str] = AltDiffusionPipeline(**a )
SCREAMING_SNAKE_CASE__ : List[str] = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a )
SCREAMING_SNAKE_CASE__ : Any = alt_pipe(**a )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[int] ) ->Union[str, Any]:
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Any = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = alt_pipe([prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
SCREAMING_SNAKE_CASE__ : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=a , safety_checker=a )
SCREAMING_SNAKE_CASE__ : Tuple = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
SCREAMING_SNAKE_CASE__ : int = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = alt_pipe([prompt] , generator=a , num_inference_steps=2 , output_type="numpy" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
from torch import nn
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :Union[str, Any] = logging.get_logger()
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = field(default_factory=lowercase__ )
snake_case_ = field(default_factory=lowercase__ )
def A_ ( self : Tuple , a : Dict , a : Tensor , a : Tensor ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self : List[str] , a : Tensor ) ->Any:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self : List[str] ) ->Optional[int]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 1
snake_case_ = field(default_factory=lowercase__ )
snake_case_ = field(default_factory=lowercase__ )
snake_case_ = True
def __call__( self : int , a : Tensor ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.dest )(a ).parametrized
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Tracker(self.src )(a ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(filter(lambda a : type(a ) not in self.src_skip , a ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(a )} operations while"""
f""" destination module has {len(a )}.""" )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , a : nn.Module ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
SCREAMING_SNAKE_CASE__ : Tuple = len(a ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.ModuleDict(a )
def A_ ( self : List[Any] , a : Tensor ) ->Optional[Any]:
return get_trunk_forward_outputs(
a , out_feat_keys=a , feature_blocks=self._feature_blocks , )
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Tuple , a : str ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[str] , a : str ) ->Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Dict = self.convert_name_to_timm(a )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(lambda: (timm.create_model(a , pretrained=a ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : Dict = super().__getitem__(a )
return val
class _a ( lowercase__ ):
"""simple docstring"""
def __getitem__( self : Union[str, Any] , a : str ) ->Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Tuple = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Tuple = RegNetForImageClassification
return val
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Callable[[], nn.Module] , _lowerCamelCase : Callable[[], nn.Module] , _lowerCamelCase : RegNetConfig , _lowerCamelCase : Path , _lowerCamelCase : bool = True , ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : Optional[Any] = our_model_func(_lowerCamelCase ).eval()
SCREAMING_SNAKE_CASE__ : int = ModuleTransfer(src=_lowerCamelCase , dest=_lowerCamelCase , raise_if_mismatch=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCamelCase )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[str] = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
SCREAMING_SNAKE_CASE__ : int = manually_copy_vissl_head(_lowerCamelCase , our_model.state_dict() , _lowerCamelCase )
our_model.load_state_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = our_model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
our_outputs.logits if isinstance(_lowerCamelCase , _lowerCamelCase ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_output[-1] if type(_lowerCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : Tuple = 224 if "seer" not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Dict = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_lowerCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
print(f"""Pushed {name}""" )
def UpperCAmelCase ( _lowerCamelCase : Path , _lowerCamelCase : str = None , _lowerCamelCase : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE__ : int = 1_000
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, num_labels)
SCREAMING_SNAKE_CASE__ : Dict = "huggingface/label-files"
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : Dict = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE__ : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = partial(_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Any = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase : str , _lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , model_dir=str(_lowerCamelCase ) , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : Dict = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : int = files["classy_state_dict"]["base_model"]["model"]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_state_dict["trunk"]
model.load_state_dict(_lowerCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Dict = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : Any = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
_lowerCamelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCamelCase , _lowerCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_lowerCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
return config, expected_shape
if __name__ == "__main__":
__lowercase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__lowercase :Optional[int] = parser.parse_args()
__lowercase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , a : Union[str, Any] , a : str=13 , a : Union[str, Any]=7 , a : Optional[int]=True , a : Dict=True , a : int=True , a : List[str]=True , a : Dict=99 , a : Any=32 , a : List[Any]=5 , a : Any=4 , a : Optional[Any]=37 , a : Optional[Any]="gelu" , a : int=0.1 , a : int=0.1 , a : List[Any]=5_12 , a : List[str]=16 , a : Dict=2 , a : List[str]=0.02 , a : List[Any]=4 , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = num_choices
def A_ ( self : Union[str, Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A_ ( self : Union[str, Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = True
snake_case_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A_ ( self : Optional[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : int = FlaxRoFormerModelTester(self )
@slow
def A_ ( self : Optional[int] ) ->Optional[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a )
SCREAMING_SNAKE_CASE__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a )[0]
SCREAMING_SNAKE_CASE__ : Dict = 5_00_00
SCREAMING_SNAKE_CASE__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE__ : str = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1E-4 ) )
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.