code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ : Union[str, Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCamelCase__ : Optional[Any] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCamelCase__ : Union[str, Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any=None , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :str=None , lowerCamelCase_ :List[str]="auto" , lowerCamelCase_ :List[Any]=-1 , lowerCamelCase_ :str=0.9 , lowerCamelCase_ :int=5 , lowerCamelCase_ :List[str]=5_00 , lowerCamelCase_ :Optional[int]="gpt2-large" , lowerCamelCase_ :Union[str, Any]=-1 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :Optional[int]=25 , lowerCamelCase_ :int=5 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Tuple=25 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_mauve(
p_text=lowerCamelCase_ , q_text=lowerCamelCase_ , p_features=lowerCamelCase_ , q_features=lowerCamelCase_ , p_tokens=lowerCamelCase_ , q_tokens=lowerCamelCase_ , num_buckets=lowerCamelCase_ , pca_max_data=lowerCamelCase_ , kmeans_explained_var=lowerCamelCase_ , kmeans_num_redo=lowerCamelCase_ , kmeans_max_iter=lowerCamelCase_ , featurize_model_name=lowerCamelCase_ , device_id=lowerCamelCase_ , max_text_length=lowerCamelCase_ , divergence_curve_discretization_size=lowerCamelCase_ , mauve_scaling_factor=lowerCamelCase_ , verbose=lowerCamelCase_ , seed=lowerCamelCase_ , )
return out
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """umt5"""
UpperCamelCase = ["""past_key_values"""]
def __init__( self :Any , lowerCamelCase_ :Union[str, Any]=25_01_12 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :List[Any]=64 , lowerCamelCase_ :str=10_24 , lowerCamelCase_ :List[str]=8 , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Optional[int]=6 , lowerCamelCase_ :int=32 , lowerCamelCase_ :Dict=1_28 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :List[Any]=1E-6 , lowerCamelCase_ :List[str]=1.0 , lowerCamelCase_ :Union[str, Any]="gated-gelu" , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Tuple="T5Tokenizer" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=0 , lowerCamelCase_ :int=1 , lowerCamelCase_ :int=0 , **lowerCamelCase_ :Optional[Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCamelCase_ , tokenizer_class=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : List[Any] = d_kv
SCREAMING_SNAKE_CASE : Optional[int] = d_ff
SCREAMING_SNAKE_CASE : List[str] = num_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE : Dict = num_heads
SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : str = relative_attention_max_distance
SCREAMING_SNAKE_CASE : str = dropout_rate
SCREAMING_SNAKE_CASE : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor
SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Any = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE : Tuple = act_info[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE : Any = '''gelu_new'''
@property
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
return self.d_model
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.num_heads
@property
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
return self.num_layers
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __lowerCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
SCREAMING_SNAKE_CASE : int = '''past_encoder_sequence + sequence'''
SCREAMING_SNAKE_CASE : str = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
return 13
@property
def __lowerCAmelCase ( self :List[str] ) -> float:
'''simple docstring'''
return 5E-4
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__:
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :int=10 , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :str=32 * 4 , lowerCamelCase_ :Union[str, Any]=32 * 6 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :Tuple=32 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE : Dict = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = min_size
SCREAMING_SNAKE_CASE : Dict = max_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : List[str] = mask_feature_size
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase_ ) > 0.5
).float()
SCREAMING_SNAKE_CASE : str = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase_ ) > 0.5).long()
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = output.encoder_hidden_states
SCREAMING_SNAKE_CASE : int = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase_ ) , config.decoder_config.decoder_layers )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str]=False ) -> str:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = MaskFormerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
def comm_check_on_output(lowerCamelCase_ :List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
comm_check_on_output(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ )
comm_check_on_output(lowerCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE : Optional[int] = MaskFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE : Optional[int] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase_ ).long(),
}
SCREAMING_SNAKE_CASE : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(**lowerCamelCase_ )
self.assertTrue(outputs.loss is not None )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(**lowerCamelCase_ , output_attentions=lowerCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE : Union[str, Any] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ).loss
loss.backward()
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.all_model_classes[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Dict = 1e-4
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**lowerCamelCase_ )
# masks_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : List[str] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
SCREAMING_SNAKE_CASE : int = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCamelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : int = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# masks_queries_logits
SCREAMING_SNAKE_CASE : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase_ )
.eval()
)
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[Any] = inputs['''pixel_values'''].to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [el.to(lowerCamelCase_ ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE : Tuple = [el.to(lowerCamelCase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**lowerCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE : Optional[Any] = n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def __A ( a_ : int )-> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def __A ( a_ : int )-> int:
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __A ( a_ : int )-> int:
'''simple docstring'''
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = mock.Mock()
SCREAMING_SNAKE_CASE : Any = 5_00
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = HTTPError
SCREAMING_SNAKE_CASE : int = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = mock.Mock()
SCREAMING_SNAKE_CASE : Any = 5_00
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = HTTPError
SCREAMING_SNAKE_CASE : Optional[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCamelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mktemp()
with open(lowerCamelCase_ , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __lowerCAmelCase ( cls :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def __lowerCAmelCase ( cls :int ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowerCamelCase_ , '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Tuple = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ , repo_id='''test-tokenizer''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[int] = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Dict = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer" , use_fast=lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Trie()
SCREAMING_SNAKE_CASE : Optional[int] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ , ['''AB''', '''C'''] )
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """deit"""
def __init__( self :int , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :Optional[Any]=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :List[str]=1E-12 , lowerCamelCase_ :Union[str, Any]=2_24 , lowerCamelCase_ :Tuple=16 , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Tuple=16 , **lowerCamelCase_ :str , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = encoder_stride
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self :Dict ) -> float:
'''simple docstring'''
return 1E-4
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : list[int] , a_ : int , )-> list[float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = coefficient_matrix.shape
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE : str = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(a_ )
if colsa != 1:
SCREAMING_SNAKE_CASE : Optional[int] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(a_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE : List[Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(a_ )
if len(a_ ) != rowsa:
SCREAMING_SNAKE_CASE : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"matrix but received {len(a_ )} and {rowsa}"
)
raise ValueError(a_ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = table.shape
strictly_diagonally_dominant(a_ )
# Iterates the whole matrix for given number of times
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for row in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
for col in range(a_ ):
if col == row:
SCREAMING_SNAKE_CASE : List[str] = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE : Optional[Any] = (temp + val) / denom
new_val.append(a_ )
SCREAMING_SNAKE_CASE : List[Any] = new_val
return [float(a_ ) for i in new_val]
def __A ( a_ : NDArray[floataa] )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = table.shape
SCREAMING_SNAKE_CASE : Any = True
for i in range(0 , a_ ):
SCREAMING_SNAKE_CASE : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """gpt_bigcode"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[Any] , lowerCamelCase_ :List[str]=5_02_57 , lowerCamelCase_ :Optional[int]=10_24 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :int=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Any=None , lowerCamelCase_ :Tuple="gelu_pytorch_tanh" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :Union[str, Any]=5_02_56 , lowerCamelCase_ :int=5_02_56 , lowerCamelCase_ :str=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :List[str]=True , **lowerCamelCase_ :Optional[Any] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[str] = n_layer
SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE : List[str] = n_inner
SCREAMING_SNAKE_CASE : int = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = resid_pdrop
SCREAMING_SNAKE_CASE : str = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = attn_pdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : str = scale_attn_weights
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : str = attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE : List[str] = scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE : List[str] = multi_query
SCREAMING_SNAKE_CASE : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :List[str] ) -> int:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :int , **lowerCamelCase_ :str ) -> int:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Any , *lowerCamelCase_ :int , **lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :int , **lowerCamelCase_ :List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Dict ) -> str:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[str] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :int , *lowerCamelCase_ :str , **lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :List[Any] , **lowerCamelCase_ :Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *lowerCamelCase_ :Optional[int] , **lowerCamelCase_ :int ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Any ) -> int:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[str] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Any , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Tuple , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Dict , *lowerCamelCase_ :int , **lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :Any ) -> str:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :str , **lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ) -> int:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :str , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Union[str, Any] , *lowerCamelCase_ :Dict , **lowerCamelCase_ :Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__( metaclass=_UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""sentencepiece"""]
def __init__( self :Optional[int] , *lowerCamelCase_ :Tuple , **lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase__:
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any] , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Tuple = 13
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : int = 99
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Dict = 37
SCREAMING_SNAKE_CASE : Union[str, Any] = '''gelu'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Optional[int] = 5_12
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Dict = 0.0_2
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Dict = None
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFEsmModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Tuple = TFEsmModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFEsmForMaskedLM(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFEsmForTokenClassification(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFEsmModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE : Dict = model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE : str = model.get_bias()
assert name is None
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Any = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
SCREAMING_SNAKE_CASE : List[str] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Any = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase__ : Optional[int] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
lowerCamelCase__ : str = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
lowerCamelCase__ : str = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :int="uniform_average" , lowerCamelCase_ :Dict=True ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mean_squared_error(
lowerCamelCase_ , lowerCamelCase_ , sample_weight=lowerCamelCase_ , multioutput=lowerCamelCase_ , squared=lowerCamelCase_ )
return {"mse": mse}
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase__ : Optional[int] = None
try:
import msvcrt
except ImportError:
lowerCamelCase__ : Any = None
try:
import fcntl
except ImportError:
lowerCamelCase__ : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase__ : Optional[Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase__ : Union[str, Any] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
lowerCamelCase__ : Union[str, Any] = "3.0.12"
lowerCamelCase__ : str = None
def __A ( )-> Dict:
'''simple docstring'''
global _logger
SCREAMING_SNAKE_CASE : Any = _logger or logging.getLogger(__name__ )
return _logger
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file
return None
def __str__( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = lock
return None
def __enter__( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.lock
def __exit__( self :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] ) -> Any:
'''simple docstring'''
self.lock.release()
return None
class lowercase__:
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple=-1 , lowerCamelCase_ :str=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
SCREAMING_SNAKE_CASE : Optional[int] = self.hash_filename_if_too_long(lowerCamelCase_ , lowerCamelCase_ )
# The path to the lock file.
SCREAMING_SNAKE_CASE : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
SCREAMING_SNAKE_CASE : List[Any] = None
# The default timeout value.
SCREAMING_SNAKE_CASE : Dict = timeout
# We use this lock primarily for the lock counter.
SCREAMING_SNAKE_CASE : Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
SCREAMING_SNAKE_CASE : Optional[Any] = 0
return None
@property
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return self._lock_file
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return self._timeout
@timeout.setter
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = float(lowerCamelCase_ )
return None
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
raise NotImplementedError()
@property
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
return self._lock_file_fd is not None
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Any=None , lowerCamelCase_ :Optional[int]=0.0_5 ) -> Union[str, Any]:
'''simple docstring'''
if timeout is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
SCREAMING_SNAKE_CASE : Tuple = id(self )
SCREAMING_SNAKE_CASE : Dict = self._lock_file
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowerCamelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
SCREAMING_SNAKE_CASE : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Dict=False ) -> Tuple:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
SCREAMING_SNAKE_CASE : int = id(self )
SCREAMING_SNAKE_CASE : List[str] = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
SCREAMING_SNAKE_CASE : Optional[Any] = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self :Any ) -> Optional[int]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.release()
return None
def __del__( self :List[Any] ) -> int:
'''simple docstring'''
self.release(force=lowerCamelCase_ )
return None
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(lowerCamelCase_ )
if len(lowerCamelCase_ ) > max_length and max_length > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = str(hash(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = filename[: max_length - len(lowerCamelCase_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
else:
return path
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int]=-1 , lowerCamelCase_ :List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase_ , timeout=lowerCamelCase_ , max_filename_length=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , lowerCamelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Any = fd
return None
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd
SCREAMING_SNAKE_CASE : int = None
msvcrt.locking(lowerCamelCase_ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=-1 , lowerCamelCase_ :Any=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = os.statvfs(os.path.dirname(lowerCamelCase_ ) ).f_namemax
super().__init__(lowerCamelCase_ , timeout=lowerCamelCase_ , max_filename_length=lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
SCREAMING_SNAKE_CASE : Any = os.open(self._lock_file , lowerCamelCase_ )
try:
fcntl.flock(lowerCamelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : str = fd
return None
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._lock_file_fd
SCREAMING_SNAKE_CASE : Union[str, Any] = None
fcntl.flock(lowerCamelCase_ , fcntl.LOCK_UN )
os.close(lowerCamelCase_ )
return None
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , lowerCamelCase_ )
except OSError:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = fd
return None
def __lowerCAmelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase__ : Union[str, Any] = None
if msvcrt:
lowerCamelCase__ : Union[str, Any] = WindowsFileLock
elif fcntl:
lowerCamelCase__ : Dict = UnixFileLock
else:
lowerCamelCase__ : List[str] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """trocr"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self :Tuple , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=10_24 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[Any]=16 , lowerCamelCase_ :int=40_96 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[Any]=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :int=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[str]=2 , **lowerCamelCase_ :Any , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : List[str] = decoder_layers
SCREAMING_SNAKE_CASE : Any = decoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = dropout
SCREAMING_SNAKE_CASE : List[str] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Dict = init_std
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : List[str] = scale_embedding
SCREAMING_SNAKE_CASE : str = use_learned_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = n
SCREAMING_SNAKE_CASE : Dict = [None] * self.n
SCREAMING_SNAKE_CASE : Dict = 0 # index of the first element
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 0
def __len__( self :str ) -> int:
'''simple docstring'''
return self.size
def __lowerCAmelCase ( self :Optional[int] ) -> bool:
'''simple docstring'''
return self.size == 0
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[str] ) -> str:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = data
SCREAMING_SNAKE_CASE : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
SCREAMING_SNAKE_CASE : Optional[int] = self.array[self.front]
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Optional[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["ViTFeatureExtractor"]
lowerCamelCase__ : List[Any] = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __A ( a_ : Any , a_ : int , a_ : int , a_ : Any )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : Dict = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
SCREAMING_SNAKE_CASE : Any = F"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE : Any = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a_ , exist_ok=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a_ , '''README.md''' )
print(F"Generating {path}" )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(a_ )
# make sure we are under the root of the project
lowerCamelCase__ : Tuple = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ : Union[str, Any] = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCamelCase__ : str = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase__:
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any]=0.2 , lowerCamelCase_ :int=0.2 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = bp_numa
SCREAMING_SNAKE_CASE : Optional[int] = bp_numa
SCREAMING_SNAKE_CASE : Dict = bp_numa
SCREAMING_SNAKE_CASE : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE : int = conva_get[2]
SCREAMING_SNAKE_CASE : List[Any] = size_pa
SCREAMING_SNAKE_CASE : Optional[Any] = rate_w
SCREAMING_SNAKE_CASE : Tuple = rate_t
SCREAMING_SNAKE_CASE : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(lowerCamelCase_ , '''wb''' ) as f:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
print(f"Model saved: {save_path}" )
@classmethod
def __lowerCAmelCase ( cls :List[Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with open(lowerCamelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE : List[str] = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE : List[Any] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE : Tuple = CNN(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# modify model parameter
SCREAMING_SNAKE_CASE : Optional[Any] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE : str = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE : Optional[int] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE : Tuple = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE : str = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE : int = model_dic.get('''thre_bp3''' )
return conv_ins
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
return round(lowerCamelCase_ , 3 )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = convs[0]
SCREAMING_SNAKE_CASE : List[str] = convs[1]
SCREAMING_SNAKE_CASE : Any = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = []
for i_focus in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : List[str] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : str = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ , lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :str="average_pool" ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE : List[str] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE : List[str] = []
for i_map in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE : int = []
for i_focus in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
for j_focus in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ , lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def __lowerCAmelCase ( self :int , lowerCamelCase_ :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : Tuple = np.shape(data[i] )
SCREAMING_SNAKE_CASE : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(lowerCamelCase_ )
return data_expanded
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.shape(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = 0
for i_map in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = np.ones((size_map, size_map) )
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
for j in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE : str = i_pool + 1
SCREAMING_SNAKE_CASE : Any = np.multiply(
lowerCamelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=bool ) -> str:
'''simple docstring'''
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(lowerCamelCase_ )) )
print((''' - - Shape: Teach_Data ''', np.shape(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[Any] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE : List[str] = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE : Dict = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.convolute(
lowerCamelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE : str = self.pooling(lowerCamelCase_ , self.size_poolinga )
SCREAMING_SNAKE_CASE : str = np.shape(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._expand(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE : Any = np.dot(lowerCamelCase_ , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sig(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = np.dot(lowerCamelCase_ , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE : Any = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE : int = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCamelCase_ , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : Optional[int] = np.multiply(
np.dot(lowerCamelCase_ , self.wkj ) , np.multiply(lowerCamelCase_ , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE : Any = np.dot(lowerCamelCase_ , self.vji )
SCREAMING_SNAKE_CASE : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE : Any = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE : int = self._calculate_gradient_from_pool(
lowerCamelCase_ , lowerCamelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rate_weight * np.dot(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE : int = rp + 1
SCREAMING_SNAKE_CASE : str = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
SCREAMING_SNAKE_CASE : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ , '''+-''' )
plt.plot(lowerCamelCase_ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(lowerCamelCase_ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE : List[str] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.convolute(
lowerCamelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE : Any = self.pooling(lowerCamelCase_ , self.size_poolinga )
SCREAMING_SNAKE_CASE : Optional[int] = self._expand(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = data_bp_input
SCREAMING_SNAKE_CASE : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE : Dict = self.sig(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE : Optional[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE : int = [list(map(self.do_round , lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.asmatrix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.convolute(
lowerCamelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE : List[str] = self.pooling(lowerCamelCase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (DPMSolverSDEScheduler,)
UpperCamelCase = 10
def __lowerCAmelCase ( self :int , **lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**lowerCamelCase_ )
return config
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = output.prev_sample
SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : List[str] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = output.prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[str] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCamelCase_ , use_karras_sigmas=lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertJapaneseTokenizer
UpperCamelCase = False
UpperCamelCase = True
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''こんにちは、世界。 \nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : List[Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.get_input_output_texts(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __lowerCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : Dict = pickle.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Any = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Tuple = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MecabTokenizer(do_lower_case=lowerCamelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : str = MecabTokenizer(
do_lower_case=lowerCamelCase_ , normalize_text=lowerCamelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MecabTokenizer(normalize_text=lowerCamelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : int = pickle.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_sudachi
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(do_lower_case=lowerCamelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(normalize_text=lowerCamelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = SudachiTokenizer(trim_whitespace=lowerCamelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : Any = pickle.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_jumanpp
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = JumanppTokenizer(normalize_text=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer(trim_whitespace=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
SCREAMING_SNAKE_CASE : Tuple = {}
for i, token in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = i
SCREAMING_SNAKE_CASE : Any = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE : List[str] = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
SCREAMING_SNAKE_CASE : Optional[int] = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __lowerCAmelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertJapaneseTokenizer
UpperCamelCase = False
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self :Tuple , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = '''こんにちは、世界。 \nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : int = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def __lowerCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
SCREAMING_SNAKE_CASE : int = {}
for i, token in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : Dict = CharacterTokenizer(vocab=lowerCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
SCREAMING_SNAKE_CASE : int = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''cl-tohoku/bert-base-japanese'''
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
SCREAMING_SNAKE_CASE : List[Any] = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
from collections import defaultdict
def __A ( a_ : str , a_ : str )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = first_str.lower().strip()
SCREAMING_SNAKE_CASE : int = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE : Optional[int] = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE : str = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(a_ ) != len(a_ ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE : defaultdict[str, int] = defaultdict(a_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(a_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase__ : Tuple = input("Enter the first string ").strip()
lowerCamelCase__ : List[str] = input("Enter the second string ").strip()
lowerCamelCase__ : Optional[int] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
import random
from typing import Any
def __A ( a_ : list )-> list[Any]:
'''simple docstring'''
for _ in range(len(a_ ) ):
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : List[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : Any )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [0] * len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
SCREAMING_SNAKE_CASE : Any = queue.pop(0 )
cnt += 1
topo.append(a_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(a_ )
if cnt != len(a_ ):
print('''Cycle exists''' )
else:
print(a_ )
# Adjacency List of Graph
lowerCamelCase__ : List[str] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE : Dict = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = ["""input_features""", """attention_mask"""]
def __init__( self :Optional[Any] , lowerCamelCase_ :int=80 , lowerCamelCase_ :Tuple=1_60_00 , lowerCamelCase_ :int=80 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Any=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_mel_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = do_ceptral_normalize
SCREAMING_SNAKE_CASE : List[Any] = normalize_means
SCREAMING_SNAKE_CASE : Union[str, Any] = normalize_vars
SCREAMING_SNAKE_CASE : int = True
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :np.ndarray , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
SCREAMING_SNAKE_CASE : str = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = ta_kaldi.fbank(lowerCamelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :np.ndarray , lowerCamelCase_ :int , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
SCREAMING_SNAKE_CASE : Union[str, Any] = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE : Optional[int] = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if normalize_vars:
SCREAMING_SNAKE_CASE : Dict = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE : Tuple = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE : int = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[np.ndarray] , lowerCamelCase_ :Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCamelCase_ , lowerCamelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )
]
def __call__( self :int , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , **lowerCamelCase_ :str , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : Optional[int] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Any = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Any = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE : str = [self._extract_fbank_features(lowerCamelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Any = BatchFeature({'''input_features''': features} )
SCREAMING_SNAKE_CASE : int = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
SCREAMING_SNAKE_CASE : List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
SCREAMING_SNAKE_CASE : Dict = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : str = self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : List[Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """falcon"""
UpperCamelCase = ["""past_key_values"""]
def __init__( self :int , lowerCamelCase_ :Dict=6_50_24 , lowerCamelCase_ :Optional[Any]=45_44 , lowerCamelCase_ :str=32 , lowerCamelCase_ :Union[str, Any]=71 , lowerCamelCase_ :Optional[Any]=1E-5 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=11 , lowerCamelCase_ :int=11 , **lowerCamelCase_ :int , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''n_embed''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Dict = attention_dropout
SCREAMING_SNAKE_CASE : Any = bos_token_id
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
SCREAMING_SNAKE_CASE : List[str] = alibi
SCREAMING_SNAKE_CASE : List[str] = new_decoder_architecture
SCREAMING_SNAKE_CASE : Optional[Any] = multi_query # Ignored when new_decoder_architecture is True
SCREAMING_SNAKE_CASE : Any = parallel_attn
SCREAMING_SNAKE_CASE : List[Any] = bias
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
return not self.alibi
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
lowerCamelCase__ : List[Any] = 256
# Modulus to hash a string
lowerCamelCase__ : Union[str, Any] = 1000003
def __A ( a_ : str , a_ : str )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE : int = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE : Tuple = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __A ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''abc1abc12'''
SCREAMING_SNAKE_CASE : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
SCREAMING_SNAKE_CASE : List[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
SCREAMING_SNAKE_CASE : List[str] = '''ABABX'''
SCREAMING_SNAKE_CASE : Optional[int] = '''ABABZABABYABABX'''
assert rabin_karp(a_ , a_ )
# Test 3)
SCREAMING_SNAKE_CASE : List[Any] = '''AAAB'''
SCREAMING_SNAKE_CASE : int = '''ABAAAAAB'''
assert rabin_karp(a_ , a_ )
# Test 4)
SCREAMING_SNAKE_CASE : Tuple = '''abcdabcy'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(a_ , a_ )
# Test 5)
SCREAMING_SNAKE_CASE : List[Any] = '''Lü'''
SCREAMING_SNAKE_CASE : List[str] = '''Lüsai'''
assert rabin_karp(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = '''Lue'''
assert not rabin_karp(a_ , a_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCamelCase__ : Dict = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """ernie_m"""
UpperCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self :Dict , lowerCamelCase_ :int = 25_00_02 , lowerCamelCase_ :int = 7_68 , lowerCamelCase_ :int = 12 , lowerCamelCase_ :int = 12 , lowerCamelCase_ :int = 30_72 , lowerCamelCase_ :str = "gelu" , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :int = 5_14 , lowerCamelCase_ :float = 0.0_2 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :float = 1E-05 , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=False , lowerCamelCase_ :Dict=0.0 , **lowerCamelCase_ :List[str] , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
SCREAMING_SNAKE_CASE : Dict = is_decoder
SCREAMING_SNAKE_CASE : Union[str, Any] = act_dropout
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = torch.device("cpu")
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
def __A ( a_ : Any )-> Tuple:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def __A ( a_ : Any , a_ : Union[str, Any] , a_ : Any )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(a_ )
SCREAMING_SNAKE_CASE : Any = val
def __A ( a_ : List[str] )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE : Any = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE : Dict = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
SCREAMING_SNAKE_CASE : Union[str, Any] = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE : Any = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
SCREAMING_SNAKE_CASE : Any = k_new.split('''.''' )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE : int = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
SCREAMING_SNAKE_CASE : Any = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __A ( a_ : str , a_ : Optional[int] , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE : Tuple = 10_00
SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : int = {int(a_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE : Optional[int] = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE : Optional[int] = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE : Dict = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE : Tuple = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE : Dict = [4, 3, 10, 5]
SCREAMING_SNAKE_CASE : str = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE : str = [4, 4, 12, 6]
SCREAMING_SNAKE_CASE : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
SCREAMING_SNAKE_CASE : Any = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' , check_hash=a_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.load(a_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Any = checkpoint
SCREAMING_SNAKE_CASE : int = create_rename_keys(a_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a_ , a_ , a_ )
# load HuggingFace model
SCREAMING_SNAKE_CASE : Dict = SwiftFormerForImageClassification(a_ ).eval()
hf_model.load_state_dict(a_ )
# prepare test inputs
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
SCREAMING_SNAKE_CASE : List[str] = processor(images=a_ , return_tensors='''pt''' )
# compare outputs from both models
SCREAMING_SNAKE_CASE : List[str] = get_expected_output(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , a_ , atol=1E-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __A ( a_ : str , a_ : float | Decimal , a_ : float = 10**-10 )-> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = a
while True:
SCREAMING_SNAKE_CASE : Tuple = Decimal(a_ ) - (
Decimal(eval(a_ ) ) / Decimal(eval(str(diff(a_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a_ ) ) < precision: # noqa: S307
return float(a_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCamelCase__ : Union[str, Any] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def __A ( a_ : Optional[Any] , a_ : int )-> Tuple:
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __A ( a_ : List[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = _TestCommandArgs(dataset=a_ , all_configs=a_ , save_infos=a_ )
SCREAMING_SNAKE_CASE : int = TestCommand(*a_ )
test_command.run()
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(a_ , '''README.md''' )
assert os.path.exists(a_ )
SCREAMING_SNAKE_CASE : Dict = DatasetInfosDict.from_directory(a_ )
SCREAMING_SNAKE_CASE : Any = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = getattr(dataset_infos['''default'''] , a_ ), getattr(expected_dataset_infos['''default'''] , a_ )
if key == "num_bytes":
assert is_apercent_close(a_ , a_ )
elif key == "splits":
assert list(a_ ) == list(a_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {"vocab_file": "spiece.model"}
lowerCamelCase__ : Tuple = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :Optional[int]="<s>" , lowerCamelCase_ :int="</s>" , lowerCamelCase_ :Dict="<unk>" , lowerCamelCase_ :Tuple="<sep>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :str="<cls>" , lowerCamelCase_ :Union[str, Any]="<mask>" , lowerCamelCase_ :List[Any]=["<eop>", "<eod>"] , lowerCamelCase_ :Optional[Dict[str, Any]] = None , **lowerCamelCase_ :int , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : List[str] = do_lower_case
SCREAMING_SNAKE_CASE : int = remove_space
SCREAMING_SNAKE_CASE : Tuple = keep_accents
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
SCREAMING_SNAKE_CASE : List[str] = jieba
SCREAMING_SNAKE_CASE : Any = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self :Any , lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE : Optional[int] = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE : List[str] = inputs
SCREAMING_SNAKE_CASE : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE : Union[str, Any] = unicodedata.normalize('''NFKD''' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : List[str] = outputs.lower()
return outputs
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.preprocess_text(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE : List[str] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ''''''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def __lowerCAmelCase ( self :Optional[int] , *lowerCamelCase_ :str , **lowerCamelCase_ :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super()._decode(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = AutoencoderKL
UpperCamelCase = """sample"""
UpperCamelCase = 1E-2
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE : str = torch.randn_like(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE : List[Any] = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE : List[Any] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE : List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
SCREAMING_SNAKE_CASE : Dict = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
SCREAMING_SNAKE_CASE : str = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE : int = image.to(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , sample_posterior=lowerCamelCase_ , generator=lowerCamelCase_ ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE : int = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-2 ) )
@slow
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> Union[str, Any]:
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy"
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[Any]=(4, 3, 5_12, 5_12) , lowerCamelCase_ :Union[str, Any]=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str="CompVis/stable-diffusion-v1-4" , lowerCamelCase_ :int=False ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''fp16''' if fpaa else None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoencoderKL.from_pretrained(
lowerCamelCase_ , subfolder='''vae''' , torch_dtype=lowerCamelCase_ , revision=lowerCamelCase_ , )
model.to(lowerCamelCase_ ).eval()
return model
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , generator=lowerCamelCase_ , sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(lowerCamelCase_ , fpaa=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , generator=lowerCamelCase_ , sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Any = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Dict = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Dict = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
SCREAMING_SNAKE_CASE : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
SCREAMING_SNAKE_CASE : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : Dict = self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_image(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model.encode(lowerCamelCase_ ).latent_dist
SCREAMING_SNAKE_CASE : Dict = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=lowerCamelCase_ )
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''ylacombe/bark-small'''
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Any = '''en_speaker_1'''
SCREAMING_SNAKE_CASE : Tuple = '''This is a test string'''
SCREAMING_SNAKE_CASE : Optional[int] = '''speaker_embeddings_path.json'''
SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __lowerCAmelCase ( self :Optional[int] , **lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __lowerCAmelCase ( self :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : Tuple = 35
SCREAMING_SNAKE_CASE : Tuple = 2
SCREAMING_SNAKE_CASE : Optional[Any] = 8
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''semantic_prompt''': np.ones(lowerCamelCase_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
lowerCamelCase__ : Dict = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """M-CLIP"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Union[str, Any]=10_24 , lowerCamelCase_ :Optional[int]=7_68 , **lowerCamelCase_ :List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = transformerDimSize
SCREAMING_SNAKE_CASE : int = imageDimSize
super().__init__(**lowerCamelCase_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = MCLIPConfig
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :str , **lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Any , lowerCamelCase_ :Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : List[str] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase_ ), embs
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : List[str] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :Any , lowerCamelCase_ :str=3_05_22 , lowerCamelCase_ :int=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Any=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Optional[int]=2_56 , lowerCamelCase_ :Optional[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Optional[int]=10_01 , lowerCamelCase_ :str=32 , lowerCamelCase_ :List[str]=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :int , ) -> int:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : List[str] = max_depth
SCREAMING_SNAKE_CASE : Any = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : List[str] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : int = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : int = xpath_unit_hidden_size
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : str )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
SCREAMING_SNAKE_CASE : Any = sorted(string.lower() )
return len(a_ ) == len(set(a_ ) )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = input("Enter a string ").strip()
lowerCamelCase__ : int = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Tuple=0 , lowerCamelCase_ :List[Any]=32 , lowerCamelCase_ :Tuple=5 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :str=0.0_2 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Tuple="last" , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Any=0 , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_lengths
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Any = gelu_activation
SCREAMING_SNAKE_CASE : int = sinusoidal_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = causal
SCREAMING_SNAKE_CASE : str = asm
SCREAMING_SNAKE_CASE : Dict = n_langs
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = n_special
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : Tuple = summary_type
SCREAMING_SNAKE_CASE : List[Any] = use_proj
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :str , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , langs=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Tuple = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , )
((SCREAMING_SNAKE_CASE), ) : List[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
((SCREAMING_SNAKE_CASE), ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : List[Any] = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Any=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str=False , lowerCamelCase_ :str=1 ) -> Union[str, Any]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE : Any = min_length + idx + 1
SCREAMING_SNAKE_CASE : Any = min_length + idx + 1
SCREAMING_SNAKE_CASE : int = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Dict=1 ) -> Tuple:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE : List[str] = min_length + idx + 1
SCREAMING_SNAKE_CASE : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , )
pass
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase_ ) # the president
SCREAMING_SNAKE_CASE : int = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """donut-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Union[str, Any] , lowerCamelCase_ :int=2_24 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Any=96 , lowerCamelCase_ :Any=[2, 2, 6, 2] , lowerCamelCase_ :str=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :str=4.0 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :int=1E-5 , **lowerCamelCase_ :Any , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = embed_dim
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Optional[Any] = window_size
SCREAMING_SNAKE_CASE : str = mlp_ratio
SCREAMING_SNAKE_CASE : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Tuple = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ : List[str] = {
"allenai/led-base-16384": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE : int = bs[:]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a_ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE : Optional[int] = [chr(a_ ) for n in cs]
return dict(zip(a_ , a_ ) )
def __A ( a_ : Any )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : Any = char
return pairs
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int="replace" , lowerCamelCase_ :Tuple="<s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :Tuple="<s>" , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :Dict="<pad>" , lowerCamelCase_ :Any="<mask>" , lowerCamelCase_ :int=False , **lowerCamelCase_ :Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE : str = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE : List[str] = bytes_to_unicode()
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE : List[str] = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE : Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :int ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : List[Any] = tuple(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = bigram
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : str = 0
while i < len(lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : Tuple = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : str = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : Any = tuple(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = ''' '''.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for token in re.findall(self.pat , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[int] ) -> Dict:
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
SCREAMING_SNAKE_CASE : Any = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None , lowerCamelCase_ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :List[Any]=False , **lowerCamelCase_ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE : List[Any] = ''' ''' + text
return (text, kwargs)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : str = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Any = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import math
def __A ( a_ : int )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a_ )
def __A ( a_ : float = 1 / 1_23_45 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 3
while True:
SCREAMING_SNAKE_CASE : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(a_ )
total_partitions += 1
if check_partition_perfect(a_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCamelCase__ : Any = get_logger(__name__)
lowerCamelCase__ : Dict = Path(__file__).parent / "model_card_template.md"
lowerCamelCase__ : Optional[int] = uuida().hex
lowerCamelCase__ : Tuple = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase__ : Any = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def __A ( a_ : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_ ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(a_ , a_ ):
ua += "; " + user_agent
return ua
def __A ( a_ : str , a_ : Optional[str] = None , a_ : Optional[str] = None )-> str:
'''simple docstring'''
if token is None:
SCREAMING_SNAKE_CASE : Tuple = HfFolder.get_token()
if organization is None:
SCREAMING_SNAKE_CASE : Any = whoami(a_ )['''name''']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __A ( a_ : Union[str, Any] , a_ : str )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(a_ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
SCREAMING_SNAKE_CASE : Any = args.hub_token if hasattr(a_ , '''hub_token''' ) else None
SCREAMING_SNAKE_CASE : Optional[int] = get_full_repo_name(a_ , token=a_ )
SCREAMING_SNAKE_CASE : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.output_dir , '''README.md''' )
model_card.save(a_ )
def __A ( a_ : Optional[str] , a_ : Optional[str] = None )-> int:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
SCREAMING_SNAKE_CASE : str = str(Path(a_ ).as_posix() )
SCREAMING_SNAKE_CASE : Any = re.search(r'''snapshots/([^/]+)/''' , a_ )
if search is None:
return None
SCREAMING_SNAKE_CASE : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCamelCase__ : Optional[Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
lowerCamelCase__ : List[Any] = os.path.join(hf_cache_home, "diffusers")
def __A ( a_ : Optional[str] = None , a_ : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
SCREAMING_SNAKE_CASE : Optional[Any] = old_diffusers_cache
SCREAMING_SNAKE_CASE : Optional[Any] = Path(a_ ).expanduser()
SCREAMING_SNAKE_CASE : List[Any] = Path(a_ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
SCREAMING_SNAKE_CASE : Dict = new_cache_dir / old_blob_path.relative_to(a_ )
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_ )
os.replace(a_ , a_ )
try:
os.symlink(a_ , a_ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCamelCase__ : int = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
lowerCamelCase__ : Tuple = 0
else:
with open(cache_version_file) as f:
try:
lowerCamelCase__ : str = int(f.read())
except ValueError:
lowerCamelCase__ : Any = 0
if cache_version < 1:
lowerCamelCase__ : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
lowerCamelCase__ : List[Any] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"the directory exists and can be written to."
)
def __A ( a_ : str , a_ : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
SCREAMING_SNAKE_CASE : Optional[int] = weights_name.split('''.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
SCREAMING_SNAKE_CASE : List[str] = '''.'''.join(a_ )
return weights_name
def __A ( a_ : int , *,
a_ : List[str] , a_ : List[str] , a_ : Tuple , a_ : Optional[int] , a_ : int , a_ : int , a_ : Tuple , a_ : Union[str, Any] , a_ : Dict , a_ : Dict , a_ : int=None , )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = str(a_ )
if os.path.isfile(a_ ):
return pretrained_model_name_or_path
elif os.path.isdir(a_ ):
if os.path.isfile(os.path.join(a_ , a_ ) ):
# Load from a PyTorch checkpoint
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a_ , a_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_ ) ):
SCREAMING_SNAKE_CASE : List[str] = os.path.join(a_ , a_ , a_ )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
a_ , filename=_add_variant(a_ , a_ ) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_ )}' so that the correct variant file can be added." , a_ , )
try:
# 2. Load model file as usual
SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : str = 250004
lowerCamelCase__ : Dict = 250020
@require_sentencepiece
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartaaTokenizer
UpperCamelCase = MBartaaTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[Any] = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''<s>'''
SCREAMING_SNAKE_CASE : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_54 )
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MBartaaTokenizer(lowerCamelCase_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : Optional[int] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = """facebook/mbart-large-50-one-to-many-mmt"""
UpperCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCamelCase = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def __lowerCAmelCase ( cls :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
SCREAMING_SNAKE_CASE : Optional[int] = 1
return cls
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : Any = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = 10
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCamelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = MBartaaTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Optional[int] = targets['''input_ids''']
SCREAMING_SNAKE_CASE : Any = shift_tokens_right(lowerCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : List[str] = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __A ( a_ : Dict , a_ : List[str] , a_ : Optional[Any]=None , a_ : Optional[Any]=None , a_ : List[Any]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Optional[int]=None , )-> Dict:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=7 , lowerCamelCase_ :int=True , lowerCamelCase_ :int=False , lowerCamelCase_ :List[Any]=99 , lowerCamelCase_ :Optional[int]=16 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :List[Any]=4 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :Any=0.0_2 , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = eos_token_id
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE : Dict = bos_token_id
SCREAMING_SNAKE_CASE : Dict = initializer_range
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE : List[Any] = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
SCREAMING_SNAKE_CASE : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = prepare_blenderbot_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 20
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Dict = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : List[Any] = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = model.decode(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 20
SCREAMING_SNAKE_CASE : Tuple = model_class_name(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Dict = model.decode(
decoder_input_ids[:, -1:] , lowerCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCamelCase_ , decoder_position_ids=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = model.decode(lowerCamelCase_ , lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class lowercase__( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 99
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self._get_config_and_data()
SCREAMING_SNAKE_CASE : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = lm_model(input_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE : int = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : List[str] = lm_model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(lowerCamelCase_ , 1 , 2 )
SCREAMING_SNAKE_CASE : List[str] = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE : Dict = np.equal(lowerCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase__( _UpperCAmelCase , unittest.TestCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FlaxBlenderbotSmallModelTester(self )
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :Union[str, Any] ):
return model.encode(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Tuple = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
SCREAMING_SNAKE_CASE : Dict = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ):
return model.decode(
decoder_input_ids=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , encoder_outputs=lowerCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : str = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : List[Any] = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE : str = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase__( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0.0
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = jnp.floataa
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Optional[int] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = resnets
SCREAMING_SNAKE_CASE : List[str] = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=True ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE : int = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0.0
UpperCamelCase = 1
UpperCamelCase = True
UpperCamelCase = jnp.floataa
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Optional[int] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str]=True ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE : int = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : Optional[int] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0.0
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = jnp.floataa
def __lowerCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : Dict = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = resnets
SCREAMING_SNAKE_CASE : Tuple = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE : Any = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Any = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Optional[int] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class lowercase__( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0.0
UpperCamelCase = 1
UpperCamelCase = True
UpperCamelCase = jnp.floataa
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : List[str] = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :int=True ) -> Dict:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE : List[str] = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Tuple = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : str = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class lowercase__( nn.Module ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 0.0
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = jnp.floataa
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE : int = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = resnets
SCREAMING_SNAKE_CASE : Any = attentions
def __call__( self :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any=True ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.resnets[0](lowerCamelCase_ , lowerCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE : str = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
return hidden_states
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
from collections import namedtuple
lowerCamelCase__ : List[str] = namedtuple("from_to", "from_ to")
lowerCamelCase__ : Any = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.0_0_1, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
"cubicyard": from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
"cubicfoot": from_to(0.0_2_8, 3_5.3_1_4_7),
"cup": from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __A ( a_ : float , a_ : str , a_ : str )-> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ''', '''.join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ''', '''.join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE : int = DDIMScheduler()
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_vq_model
SCREAMING_SNAKE_CASE : Dict = LDMPipeline(unet=lowerCamelCase_ , vqvae=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ldm(generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = ldm(generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' , return_dict=lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
SCREAMING_SNAKE_CASE : Dict = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(lowerCamelCase_ )
ldm.set_progress_bar_config(disable=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = ldm(generator=lowerCamelCase_ , num_inference_steps=5 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
SCREAMING_SNAKE_CASE : str = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
SCREAMING_SNAKE_CASE : str = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
UpperCamelCase = """CIDAS/clipseg-rd64-refined"""
UpperCamelCase = """image_segmenter"""
UpperCamelCase = CLIPSegForImageSegmentation
UpperCamelCase = ["""image""", """text"""]
UpperCamelCase = ["""image"""]
def __init__( self :Dict , *lowerCamelCase_ :int , **lowerCamelCase_ :Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :"Image" , lowerCamelCase_ :str ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase_ , return_tensors='''pt''' )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Dict ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = self.model(**lowerCamelCase_ ).logits
return logits
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """lilt"""
def __init__( self :List[Any] , lowerCamelCase_ :Dict=3_05_22 , lowerCamelCase_ :Any=7_68 , lowerCamelCase_ :int=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Tuple=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :Optional[Any]=2 , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Dict=1E-12 , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :Any=None , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Optional[Any]=10_24 , **lowerCamelCase_ :Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = channel_shrink_ratio
SCREAMING_SNAKE_CASE : str = max_ad_position_embeddings
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=13 , lowerCamelCase_ :List[str]=30 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Tuple=32 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :List[str]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[Any]=10 , lowerCamelCase_ :Union[str, Any]=0.0_2 , lowerCamelCase_ :Tuple=3 , lowerCamelCase_ :str=None , lowerCamelCase_ :Any=2 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Optional[Any] = num_patches + 2
def __lowerCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = DeiTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = DeiTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : List[Any] = DeiTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : Tuple = DeiTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = DeiTModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(**lowerCamelCase_ ).loss
loss.backward()
def __lowerCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Dict = model_class(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ ).loss
loss.backward()
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : str = problem_type['''title''']
SCREAMING_SNAKE_CASE : Dict = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase_ ) as warning_list:
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = DeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :Tuple=30 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :List[Any]=5 , lowerCamelCase_ :List[str]=4 , lowerCamelCase_ :Dict=37 , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :str=10 , lowerCamelCase_ :List[Any]=0.0_2 , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=2 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Union[str, Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : List[str] = num_patches + 1
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ViTForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ViTModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = ViTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 )
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : int = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE : str = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : int = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Any = inputs.pixel_values.to(lowerCamelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase__ : str = get_tests_dir("fixtures")
lowerCamelCase__ : Optional[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowerCamelCase__ : Optional[Any] = get_tests_dir("fixtures/dummy-config.json")
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
SCREAMING_SNAKE_CASE : str = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self :Any ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : List[Any] = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = True
try:
AutoConfig.register('''custom''' , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(lowerCamelCase_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
from typing import Any
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = data
SCREAMING_SNAKE_CASE : Dict = None
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.head
while temp is not None:
print(temp.data , end=''' ''' )
SCREAMING_SNAKE_CASE : Dict = temp.next
print()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = Node(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.head
SCREAMING_SNAKE_CASE : Optional[Any] = new_node
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
SCREAMING_SNAKE_CASE : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Optional[Any] = node_a.next
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
SCREAMING_SNAKE_CASE : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase__ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ : Dict = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase__ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCamelCase__ : Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCamelCase__ : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :List[str] ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Any] ) -> Any:
'''simple docstring'''
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str]=0.9 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :str=0.5 ) -> int:
'''simple docstring'''
if NLTK_VERSION >= version.Version('''3.6.5''' ):
SCREAMING_SNAKE_CASE : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
else:
SCREAMING_SNAKE_CASE : Dict = [
meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ : int | float | str , a_ : int | float | str )-> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE : Dict = int(a_ )
SCREAMING_SNAKE_CASE : Dict = int(a_ )
SCREAMING_SNAKE_CASE : list[str] = []
for temp in range(int(a_ ) ):
series.append(F"1 / {pow(temp + 1 , int(a_ ) )}" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Any = int(input("Enter the last number (nth term) of the P-Series"))
lowerCamelCase__ : Optional[Any] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __A ( a_ : Optional[int] , a_ : int=False )-> Union[str, Any]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE : List[str] = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
lowerCamelCase__ : int = parse_flag_from_env("RUN_SLOW", default=False)
lowerCamelCase__ : Optional[int] = parse_flag_from_env("RUN_REMOTE", default=False)
lowerCamelCase__ : Optional[int] = parse_flag_from_env("RUN_LOCAL", default=True)
lowerCamelCase__ : str = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
lowerCamelCase__ : Dict = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
lowerCamelCase__ : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
lowerCamelCase__ : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
lowerCamelCase__ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
lowerCamelCase__ : List[str] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
lowerCamelCase__ : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
lowerCamelCase__ : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __A ( a_ : Optional[Any] )-> Dict:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE : Dict = unittest.skip('''test requires faiss''' )(a_ )
return test_case
def __A ( a_ : Any )-> Optional[int]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE : int = unittest.skip('''test requires regex''' )(a_ )
return test_case
def __A ( a_ : List[str] )-> List[str]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE : Optional[int] = unittest.skip('''test requires elasticsearch''' )(a_ )
return test_case
def __A ( a_ : int )-> Tuple:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = unittest.skip('''test requires sqlalchemy''' )(a_ )
return test_case
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE : List[Any] = unittest.skip('''test requires PyTorch''' )(a_ )
return test_case
def __A ( a_ : int )-> List[Any]:
'''simple docstring'''
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE : Any = unittest.skip('''test requires TensorFlow''' )(a_ )
return test_case
def __A ( a_ : Dict )-> Any:
'''simple docstring'''
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE : List[str] = unittest.skip('''test requires JAX''' )(a_ )
return test_case
def __A ( a_ : Optional[Any] )-> Any:
'''simple docstring'''
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE : List[Any] = unittest.skip('''test requires Pillow''' )(a_ )
return test_case
def __A ( a_ : Optional[Any] )-> Tuple:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(a_ )
else:
return test_case
def __A ( a_ : Tuple )-> Optional[int]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(a_ )
else:
return test_case
def __A ( a_ : Union[str, Any] )-> int:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(a_ )
else:
return test_case
def __A ( a_ : Tuple )-> int:
'''simple docstring'''
def _require_spacy_model(a_ : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(a_ )
except ImportError:
return unittest.skip('''test requires spacy''' )(a_ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a_ ) )(a_ )
else:
return test_case
return _require_spacy_model
def __A ( a_ : str )-> int:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(a_ )
else:
return test_case
def __A ( a_ : List[Any] )-> Optional[Any]:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(a_ )
else:
return test_case
def __A ( a_ : Dict )-> str:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = unittest.skip('''test is slow''' )(a_ )
return test_case
def __A ( a_ : str )-> List[Any]:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE : Optional[int] = unittest.skip('''test is local''' )(a_ )
return test_case
def __A ( a_ : str )-> str:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE : int = unittest.skip('''test is packaged''' )(a_ )
return test_case
def __A ( a_ : str )-> Tuple:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE : Optional[int] = unittest.skip('''test requires remote''' )(a_ )
return test_case
def __A ( *a_ : int )-> Tuple:
'''simple docstring'''
def decorate(cls : Any ):
for name, fn in cls.__dict__.items():
if callable(a_ ) and name.startswith('''test''' ):
for decorator in decorators:
SCREAMING_SNAKE_CASE : Optional[Any] = decorator(a_ )
setattr(cls , a_ , a_ )
return cls
return decorate
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
pass
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
@contextmanager
def __A ( a_ : List[str]=OfflineSimulationMode.CONNECTION_FAILS , a_ : List[Any]=1E-16 )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = requests.Session().request
def timeout_request(a_ : int , a_ : int , a_ : Dict , **a_ : Tuple ):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
SCREAMING_SNAKE_CASE : Dict = timeout
try:
return online_request(a_ , a_ , **a_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE : int = url
SCREAMING_SNAKE_CASE : Any = e.args[0]
SCREAMING_SNAKE_CASE : Optional[int] = (max_retry_error.args[0].replace('''10.255.255.1''' , F"OfflineMock[{url}]" ),)
SCREAMING_SNAKE_CASE : Union[str, Any] = (max_retry_error,)
raise
def raise_connection_error(a_ : Dict , a_ : Dict , **a_ : List[Any] ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=a_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , a_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , a_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , a_ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __A ( *a_ : Optional[int] , **a_ : Any )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a_ , **a_ ) as tmp_dir:
try:
os.chdir(a_ )
yield
finally:
os.chdir(a_ )
@contextmanager
def __A ( )-> Optional[int]:
'''simple docstring'''
import gc
gc.collect()
SCREAMING_SNAKE_CASE : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __A ( )-> List[str]:
'''simple docstring'''
import gc
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __A ( a_ : Any , a_ : str )-> List[str]:
'''simple docstring'''
return deepcopy(a_ ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(a_ ).integers(0 , 1_00 , 10 ).tolist()
def __A ( a_ : Tuple )-> Tuple:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(a_ : Dict , *a_ : int , **a_ : Tuple ):
try:
return func(*a_ , **a_ )
except HTTPError as err:
if str(a_ ).startswith('''500''' ) or str(a_ ).startswith('''502''' ):
pytest.xfail(str(a_ ) )
raise err
return decorator.decorator(_wrapper , a_ )
class lowercase__:
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = returncode
SCREAMING_SNAKE_CASE : int = stdout
SCREAMING_SNAKE_CASE : Tuple = stderr
async def __A ( a_ : List[str] , a_ : Optional[int] )-> Tuple:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE : int = await stream.readline()
if line:
callback(a_ )
else:
break
async def __A ( a_ : Optional[int] , a_ : Optional[Any]=None , a_ : int=None , a_ : int=None , a_ : List[str]=False , a_ : Optional[Any]=False )-> _RunOutput:
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(a_ ) )
SCREAMING_SNAKE_CASE : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Tuple = []
def tee(a_ : int , a_ : Tuple , a_ : Any , a_ : Optional[Any]="" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(a_ )
if not quiet:
print(a_ , a_ , file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda a_ : tee(a_ , a_ , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda a_ : tee(a_ , a_ , sys.stderr , label='''stderr:''' ) ),
] , timeout=a_ , )
return _RunOutput(await p.wait() , a_ , a_ )
def __A ( a_ : Optional[Any] , a_ : Any=None , a_ : List[str]=None , a_ : Tuple=1_80 , a_ : List[Any]=False , a_ : str=True )-> _RunOutput:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE : Any = loop.run_until_complete(
_stream_subprocess(a_ , env=a_ , stdin=a_ , timeout=a_ , quiet=a_ , echo=a_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = ''' '''.join(a_ )
if result.returncode > 0:
SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output." )
return result
def __A ( )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(r'''^gw''' , '''''' , a_ , 0 , re.M )
return int(a_ )
def __A ( )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 2_95_00
SCREAMING_SNAKE_CASE : str = pytest_xdist_worker_id()
return port + uniq_delta
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = LEDTokenizer
UpperCamelCase = LEDTokenizerFast
UpperCamelCase = True
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : List[str] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE : str = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :int , **lowerCamelCase_ :Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , **lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :int ) -> Optional[int]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Any = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : str = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_ ) , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertNotIn('''labels''' , lowerCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_ )
@require_torch
def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : int = tokenizer(text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['''A long paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Dict = tokenizer(text_target=lowerCamelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : str = inputs['''input_ids''']
SCREAMING_SNAKE_CASE : List[str] = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Any = ['''Summary of the text.''', '''Another summary.''']
SCREAMING_SNAKE_CASE : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [[0] * len(lowerCamelCase_ ) for x in encoded_output['''input_ids''']]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.pad(lowerCamelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = '''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE : str = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase__ : Dict = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :int = 14 ) -> None:
'''simple docstring'''
if group not in primes:
raise ValueError('''Unsupported Group''' )
SCREAMING_SNAKE_CASE : Tuple = primes[group]['''prime''']
SCREAMING_SNAKE_CASE : Any = primes[group]['''generator''']
SCREAMING_SNAKE_CASE : Optional[Any] = int(hexlify(urandom(32 ) ) , base=16 )
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
return hex(self.__private_key )[2:]
def __lowerCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase_ )[2:]
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int ) -> bool:
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ , base=16 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('''Invalid public key''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = pow(lowerCamelCase_ , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ) -> bool:
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_ , (prime - 1) // 2 , lowerCamelCase_ ) == 1
)
@staticmethod
def __lowerCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :int = 14 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = int(lowerCamelCase_ , base=16 )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(lowerCamelCase_ , base=16 )
SCREAMING_SNAKE_CASE : Union[str, Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''Invalid public key''' )
SCREAMING_SNAKE_CASE : Tuple = pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
from math import pow
def __A ( a_ : int , a_ : int , a_ : int , a_ : int , a_ : int , )-> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : Dict = int(pow(a_ , a_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = backtrack(
a_ , a_ , current_number + 1 , a_ , a_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = backtrack(
a_ , a_ , current_number + 1 , a_ , a_ )
return current_sum, solutions_count
def __A ( a_ : int , a_ : int )-> int:
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(a_ , a_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : int = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
import math
import os
import sys
def __A ( a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ''''''
try:
with open(a_ , '''rb''' ) as binary_file:
SCREAMING_SNAKE_CASE : str = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE : List[str] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __A ( a_ : dict[str, str] , a_ : str , a_ : int , a_ : str )-> None:
'''simple docstring'''
lexicon.pop(a_ )
SCREAMING_SNAKE_CASE : str = last_match_id
if math.loga(a_ ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE : List[Any] = '''0''' + lexicon[curr_key]
SCREAMING_SNAKE_CASE : List[str] = bin(a_ )[2:]
def __A ( a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {'''0''': '''0''', '''1''': '''1'''}
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = '''''', ''''''
SCREAMING_SNAKE_CASE : List[Any] = len(a_ )
for i in range(len(a_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(a_ , a_ , a_ , a_ )
index += 1
SCREAMING_SNAKE_CASE : List[Any] = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def __A ( a_ : str , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = os.path.getsize(a_ )
SCREAMING_SNAKE_CASE : Dict = bin(a_ )[2:]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __A ( a_ : str , a_ : str )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = 8
try:
with open(a_ , '''wb''' ) as opened_file:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(a_ ) , a_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(a_ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __A ( a_ : str , a_ : str )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = read_file_binary(a_ )
SCREAMING_SNAKE_CASE : Dict = compress_data(a_ )
SCREAMING_SNAKE_CASE : Any = add_file_length(a_ , a_ )
write_file_binary(a_ , a_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase__ : List[str] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48000,
"sample_size": 131072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
}
def __A ( a_ : List[str] , a_ : Any )-> int:
'''simple docstring'''
return torch.atana(a_ , a_ ) / math.pi * 2
def __A ( a_ : List[str] )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(a_ , a_ )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
pass
class lowercase__( nn.Module ):
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = DiffusionAttnUnetaD(lowerCamelCase_ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE : int = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE : List[str] = torch.quasirandom.SobolEngine(1 , scramble=lowerCamelCase_ )
def __A ( a_ : Tuple )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MODELS_MAP[model_name]['''url''']
os.system(F"wget {url} ./" )
return F"./{model_name}.ckpt"
lowerCamelCase__ : str = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
lowerCamelCase__ : Optional[Any] = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
lowerCamelCase__ : Optional[int] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
lowerCamelCase__ : Union[str, Any] = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
lowerCamelCase__ : Dict = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
lowerCamelCase__ : List[Any] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __A ( a_ : Dict )-> Union[str, Any]:
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __A ( a_ : str )-> Optional[Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(a_ ) and not isinstance(a_ , a_ ):
return name.replace(a_ , a_ )
elif name.startswith(a_ ):
return [name.replace(a_ , a_ ) for v in value]
raise ValueError(F"Attn error with {name}" )
def __A ( a_ : Dict , a_ : Optional[Any]=13 )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
SCREAMING_SNAKE_CASE : Tuple = 0
if string.startswith('''net.3.''' ):
depth += 1
SCREAMING_SNAKE_CASE : Dict = string[6:]
elif string.startswith('''net.''' ):
SCREAMING_SNAKE_CASE : Dict = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
SCREAMING_SNAKE_CASE : List[str] = string[7:]
if string.startswith('''main.''' ):
SCREAMING_SNAKE_CASE : str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE : Any = string[:2]
SCREAMING_SNAKE_CASE : List[str] = string[2:]
else:
SCREAMING_SNAKE_CASE : List[str] = string[0]
SCREAMING_SNAKE_CASE : Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE : Tuple = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Optional[Any] = '''mid_block'''
elif depth > 0 and int(a_ ) < 7:
SCREAMING_SNAKE_CASE : str = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Any = F"down_blocks.{depth}"
elif depth > 0 and int(a_ ) > 7:
SCREAMING_SNAKE_CASE : Tuple = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : List[Any] = F"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
SCREAMING_SNAKE_CASE : Dict = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Any = F"up_blocks.{max_depth - 1}" if int(a_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." )
SCREAMING_SNAKE_CASE : int = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE : List[str] = convert_resconv_naming(a_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE : Tuple = convert_attn_naming(a_ )
SCREAMING_SNAKE_CASE : str = new_string_left
if not isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE : int = prefix + '''.''' + new_layer + '''.''' + string_left
else:
SCREAMING_SNAKE_CASE : List[Any] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __A ( a_ : Tuple )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE : Optional[int] = rename(a_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = transform_conv_attns(a_ , a_ , a_ )
else:
SCREAMING_SNAKE_CASE : Dict = v
return new_state_dict
def __A ( a_ : Any , a_ : Tuple , a_ : List[Any] )-> Union[str, Any]:
'''simple docstring'''
if len(a_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE : Dict = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE : Dict = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE : List[Any] = v.shape[0]
SCREAMING_SNAKE_CASE : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __A ( a_ : Dict )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : List[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
SCREAMING_SNAKE_CASE : Tuple = download(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = MODELS_MAP[model_name]['''sample_rate''']
SCREAMING_SNAKE_CASE : Optional[Any] = MODELS_MAP[model_name]['''sample_size''']
SCREAMING_SNAKE_CASE : int = Object()
SCREAMING_SNAKE_CASE : int = sample_size
SCREAMING_SNAKE_CASE : Optional[int] = sample_rate
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = UNetaDModel(sample_size=a_ , sample_rate=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE : int = DiffusionUncond(a_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=a_ )['''state_dict'''] )
SCREAMING_SNAKE_CASE : Tuple = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = orig_model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = rename_orig_weights(a_ )
SCREAMING_SNAKE_CASE : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE : str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(a_ ) == 0, F"Problem with {renamed_minus_diffusers}"
assert all(k.endswith('''kernel''' ) for k in list(a_ ) ), F"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE : str = value.squeeze()
SCREAMING_SNAKE_CASE : Dict = value
diffusers_model.load_state_dict(a_ )
SCREAMING_SNAKE_CASE : Tuple = 1_00
SCREAMING_SNAKE_CASE : int = 33
SCREAMING_SNAKE_CASE : Optional[int] = IPNDMScheduler(num_train_timesteps=a_ )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(a_ )
SCREAMING_SNAKE_CASE : List[str] = torch.randn([1, 2, config.sample_size] , generator=a_ ).to(a_ )
SCREAMING_SNAKE_CASE : Tuple = torch.linspace(1 , 0 , steps + 1 , device=a_ )[:-1]
SCREAMING_SNAKE_CASE : str = get_crash_schedule(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = DanceDiffusionPipeline(unet=a_ , scheduler=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : str = pipe(num_inference_steps=a_ , generator=a_ ).audios
SCREAMING_SNAKE_CASE : str = sampling.iplms_sample(a_ , a_ , a_ , {} )
SCREAMING_SNAKE_CASE : int = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE : Dict = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE : Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , a_ )
print('''Diff max''' , a_ )
assert diff_max < 1E-3, F"Diff max: {diff_max} is too much :-/"
print(F"Conversion for {model_name} successful!" )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase__ : Dict = parser.parse_args()
main(args)
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ : int = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mask2former"""
UpperCamelCase = ["""swin"""]
UpperCamelCase = {"""hidden_size""": """hidden_dim"""}
def __init__( self :Optional[Any] , lowerCamelCase_ :Optional[Dict] = None , lowerCamelCase_ :int = 2_56 , lowerCamelCase_ :int = 2_56 , lowerCamelCase_ :int = 2_56 , lowerCamelCase_ :int = 10_24 , lowerCamelCase_ :str = "relu" , lowerCamelCase_ :int = 6 , lowerCamelCase_ :int = 10 , lowerCamelCase_ :int = 8 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :int = 20_48 , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , lowerCamelCase_ :int = 4 , lowerCamelCase_ :int = 2_55 , lowerCamelCase_ :int = 1_00 , lowerCamelCase_ :float = 0.1 , lowerCamelCase_ :float = 2.0 , lowerCamelCase_ :float = 5.0 , lowerCamelCase_ :float = 5.0 , lowerCamelCase_ :int = 1_25_44 , lowerCamelCase_ :float = 3.0 , lowerCamelCase_ :float = 0.7_5 , lowerCamelCase_ :float = 0.0_2 , lowerCamelCase_ :float = 1.0 , lowerCamelCase_ :bool = True , lowerCamelCase_ :List[int] = [4, 8, 16, 32] , lowerCamelCase_ :bool = None , **lowerCamelCase_ :int , ) -> List[str]:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
SCREAMING_SNAKE_CASE : Any = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : List[str] = config_class.from_dict(lowerCamelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
f"Supported model types: {','.join(self.backbones_supported )}" )
SCREAMING_SNAKE_CASE : Any = backbone_config
SCREAMING_SNAKE_CASE : str = feature_size
SCREAMING_SNAKE_CASE : Any = mask_feature_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dim
SCREAMING_SNAKE_CASE : List[Any] = encoder_feedforward_dim
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : List[Any] = dim_feedforward
SCREAMING_SNAKE_CASE : Any = pre_norm
SCREAMING_SNAKE_CASE : List[str] = enforce_input_projection
SCREAMING_SNAKE_CASE : Dict = common_stride
SCREAMING_SNAKE_CASE : Dict = ignore_value
SCREAMING_SNAKE_CASE : List[str] = num_queries
SCREAMING_SNAKE_CASE : List[str] = no_object_weight
SCREAMING_SNAKE_CASE : List[Any] = class_weight
SCREAMING_SNAKE_CASE : Dict = mask_weight
SCREAMING_SNAKE_CASE : Any = dice_weight
SCREAMING_SNAKE_CASE : Dict = train_num_points
SCREAMING_SNAKE_CASE : Optional[int] = oversample_ratio
SCREAMING_SNAKE_CASE : Dict = importance_sample_ratio
SCREAMING_SNAKE_CASE : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE : List[Any] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[int] = use_auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = feature_strides
SCREAMING_SNAKE_CASE : Dict = output_auxiliary_logits
SCREAMING_SNAKE_CASE : str = decoder_layers
super().__init__(**lowerCamelCase_ )
@classmethod
def __lowerCAmelCase ( cls :int , lowerCamelCase_ :PretrainedConfig , **lowerCamelCase_ :int ) -> Union[str, Any]:
'''simple docstring'''
return cls(
backbone_config=lowerCamelCase_ , **lowerCamelCase_ , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict[str, any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : str = self.__class__.model_type
return output
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Any = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import os
import sys
lowerCamelCase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : str = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __A ( *a_ : Any , **a_ : Union[str, Any] )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __A ( *a_ : str , **a_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModel.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> Dict:
'''simple docstring'''
return AutoModel.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __A ( *a_ : Any , **a_ : Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __A ( *a_ : Dict , **a_ : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __A ( *a_ : Optional[int] , **a_ : str )-> Optional[int]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __A ( *a_ : List[str] , **a_ : int )-> List[Any]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
| 18 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCamelCase__ : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCamelCase__ : int = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCamelCase__ : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : str = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : List[Any] = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 18 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """encodec"""
def __init__( self :List[str] , lowerCamelCase_ :Tuple=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase_ :str=2_40_00 , lowerCamelCase_ :Any=1 , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[Any]=None , lowerCamelCase_ :str=1_28 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :int=1 , lowerCamelCase_ :Dict=[8, 5, 4, 2] , lowerCamelCase_ :List[Any]="weight_norm" , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]="reflect" , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Dict=1.0 , lowerCamelCase_ :Any=10_24 , lowerCamelCase_ :str=None , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Optional[int] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : Tuple = normalize
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_filters
SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Optional[int] = norm_type
SCREAMING_SNAKE_CASE : Any = kernel_size
SCREAMING_SNAKE_CASE : Union[str, Any] = last_kernel_size
SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
SCREAMING_SNAKE_CASE : Any = dilation_growth_rate
SCREAMING_SNAKE_CASE : Optional[int] = use_causal_conv
SCREAMING_SNAKE_CASE : str = pad_mode
SCREAMING_SNAKE_CASE : List[Any] = compress
SCREAMING_SNAKE_CASE : Optional[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Dict = trim_right_ratio
SCREAMING_SNAKE_CASE : List[Any] = codebook_size
SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 18 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :str=7 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :Any=36 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :str=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Optional[int]="gelu" , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :int=6 , lowerCamelCase_ :str=6 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :Tuple=10_00 , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = text_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = coordinate_size
SCREAMING_SNAKE_CASE : List[Any] = shape_size
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : str = text_seq_length
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = t
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , pixel_values=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : int = LayoutLMvaForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , pixel_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
return True
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :str=False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCamelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in get_values(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
elif model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
return inputs_dict
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def __lowerCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :str ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(
input_ids=input_ids.to(lowerCamelCase_ ) , bbox=bbox.to(lowerCamelCase_ ) , pixel_values=pixel_values.to(lowerCamelCase_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """xlm-prophetnet"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self :List[Any] , lowerCamelCase_ :Optional[float] = 0.1 , lowerCamelCase_ :Optional[Union[str, Callable]] = "gelu" , lowerCamelCase_ :Optional[int] = 3_05_22 , lowerCamelCase_ :Optional[int] = 10_24 , lowerCamelCase_ :Optional[int] = 40_96 , lowerCamelCase_ :Optional[int] = 12 , lowerCamelCase_ :Optional[int] = 16 , lowerCamelCase_ :Optional[int] = 40_96 , lowerCamelCase_ :Optional[int] = 12 , lowerCamelCase_ :Optional[int] = 16 , lowerCamelCase_ :Optional[float] = 0.1 , lowerCamelCase_ :Optional[float] = 0.1 , lowerCamelCase_ :Optional[int] = 5_12 , lowerCamelCase_ :Optional[float] = 0.0_2 , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = 0 , lowerCamelCase_ :Optional[int] = 2 , lowerCamelCase_ :Optional[int] = 32 , lowerCamelCase_ :Optional[int] = 1_28 , lowerCamelCase_ :Optional[bool] = False , lowerCamelCase_ :Optional[float] = 0.0 , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = 0 , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :Optional[int] = 2 , **lowerCamelCase_ :List[Any] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Dict = num_encoder_layers
SCREAMING_SNAKE_CASE : Tuple = num_encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = num_decoder_layers
SCREAMING_SNAKE_CASE : List[str] = num_decoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : str = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE : List[str] = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE : Any = ngram
SCREAMING_SNAKE_CASE : Optional[Any] = num_buckets
SCREAMING_SNAKE_CASE : int = relative_max_distance
SCREAMING_SNAKE_CASE : Optional[int] = disable_ngram_loss
SCREAMING_SNAKE_CASE : Optional[int] = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : int = dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
@property
def __lowerCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 18 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
lowerCamelCase__ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
UpperCamelCase = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
UpperCamelCase = field(default=_UpperCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Optional[int] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[int] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : str = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : int = load_dataset('''csv''' , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : Tuple = load_dataset('''json''' , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : str = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : Union[str, Any] = len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
SCREAMING_SNAKE_CASE : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Tuple = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
SCREAMING_SNAKE_CASE : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ : str ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ : List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : List[Any] = examples['''statement''']
SCREAMING_SNAKE_CASE : Optional[int] = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : Any = tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Tuple = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
SCREAMING_SNAKE_CASE : str = p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : Tuple = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : str = last_checkpoint
SCREAMING_SNAKE_CASE : str = trainer.train(resume_from_checkpoint=a_ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
SCREAMING_SNAKE_CASE : Optional[int] = min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a_ )
trainer.save_metrics('''train''' , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate(eval_dataset=a_ )
SCREAMING_SNAKE_CASE : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = min(a_ , len(a_ ) )
trainer.log_metrics('''eval''' , a_ )
trainer.save_metrics('''eval''' , a_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : Optional[Any] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(a_ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a_ , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
SCREAMING_SNAKE_CASE : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __A ( a_ : List[str] )-> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=None , **lowerCamelCase_ :Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : int = config_class
SCREAMING_SNAKE_CASE : Dict = has_text_modality
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs
SCREAMING_SNAKE_CASE : int = common_properties
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE : Tuple = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCamelCase_ ):
try:
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.parent.assertEqual(
getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , msg=f"`{name} value {idx} expected, but was {getattr(lowerCamelCase_ , lowerCamelCase_ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , msg=f"`{name} value {idx} expected, but was {getattr(lowerCamelCase_ , lowerCamelCase_ )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowerCAmelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE : int = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(lowerCamelCase_ , '''config.json''' )
config_first.to_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.config_class.from_json_file(lowerCamelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class.from_pretrained(lowerCamelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE : int = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
config_first.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.config_class.from_pretrained(lowerCamelCase_ , subfolder=lowerCamelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE : Any = self.config_class()
self.parent.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowerCamelCase_ , lowerCamelCase_ ) != value:
wrong_values.append((key, getattr(lowerCamelCase_ , lowerCamelCase_ ), value) )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE : str = '''\n'''.join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 18 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :Dict , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = 1_00 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[float] = None , lowerCamelCase_ :bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE : Tuple = audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
SCREAMING_SNAKE_CASE : Optional[int] = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE : Optional[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
SCREAMING_SNAKE_CASE : List[str] = int(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = next(iter(self.unet.parameters() ) ).dtype
SCREAMING_SNAKE_CASE : List[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , device=audio.device )
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE : Dict = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
SCREAMING_SNAKE_CASE : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """blenderbot-small"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Any , lowerCamelCase_ :Dict=5_02_65 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :Tuple=8 , lowerCamelCase_ :int=20_48 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :str=20_48 , lowerCamelCase_ :Optional[Any]=16 , lowerCamelCase_ :Union[str, Any]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=0.0 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Dict=False , lowerCamelCase_ :Optional[int]=0 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=2 , **lowerCamelCase_ :Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self :Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super().outputs
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :int , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : str = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE : int = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[int] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : int = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :PreTrainedTokenizer , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase__ : Optional[int] = logging.getLogger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """masked_bert"""
def __init__( self :Dict , lowerCamelCase_ :int=3_05_22 , lowerCamelCase_ :Any=7_68 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :List[Any]=12 , lowerCamelCase_ :List[str]=30_72 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[int]=0.0_2 , lowerCamelCase_ :List[Any]=1E-12 , lowerCamelCase_ :str=0 , lowerCamelCase_ :Dict="topK" , lowerCamelCase_ :List[Any]="constant" , lowerCamelCase_ :Optional[Any]=0.0 , **lowerCamelCase_ :Any , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = pruning_method
SCREAMING_SNAKE_CASE : List[str] = mask_init
SCREAMING_SNAKE_CASE : int = mask_scale
| 18 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , *lowerCamelCase_ :Union[str, Any] , **lowerCamelCase_ :str ) -> Optional[int]:
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {}
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :int , **lowerCamelCase_ :Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super().add_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :List[Any] , *lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str=1 , **lowerCamelCase_ :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = []
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = placeholder_token + f"_{i}"
self.try_adding_tokens(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
output.append(lowerCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
SCREAMING_SNAKE_CASE : str = output
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :Optional[int]=1.0 ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(len(lowerCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE : List[Any] = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE : List[str] = tokens[: 1 + int(len(lowerCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE : Any = copy.copy(lowerCamelCase_ )
random.shuffle(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = text.replace(lowerCamelCase_ , ''' '''.join(lowerCamelCase_ ) )
return text
def __call__( self :Any , lowerCamelCase_ :Union[str, Any] , *lowerCamelCase_ :str , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :Tuple=1.0 , **lowerCamelCase_ :Tuple ) -> Any:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :str , *lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[str]=1.0 , **lowerCamelCase_ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase_ , vector_shuffle=lowerCamelCase_ , prop_tokens_to_load=lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ , )
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
def __A ( a_ : List[Any] )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = r'''\w+[.]\d+'''
SCREAMING_SNAKE_CASE : Any = re.findall(a_ , a_ )
for pat in pats:
SCREAMING_SNAKE_CASE : Any = key.replace(a_ , '''_'''.join(pat.split('''.''' ) ) )
return key
def __A ( a_ : Dict , a_ : Optional[int] , a_ : Dict )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __A ( a_ : Optional[Any] , a_ : int , a_ : List[Any]=42 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_model.init_weights(PRNGKey(a_ ) )
SCREAMING_SNAKE_CASE : int = flatten_dict(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : List[Any] = rename_key(a_ )
SCREAMING_SNAKE_CASE : List[str] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = rename_key_and_reshape_tensor(a_ , a_ , a_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Dict = jnp.asarray(a_ )
return unflatten_dict(a_ )
| 18 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ : List[Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowerCamelCase__ : str = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowerCamelCase__ : int = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Union[str, Any]="binary" , lowerCamelCase_ :Dict=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = fa_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ )
return {"f1": float(lowerCamelCase_ ) if score.size == 1 else score}
| 18 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
lowerCamelCase__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __A ( a_ : str , a_ : Tuple )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE : Any = int(re.match(r'''.*layer_(\d*).*''' , a_ )[1] )
layer_number -= 3
return F"h.{layer_number}." + key
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE : Union[str, Any] = re.search(r'''[^\d](\d+)$''' , str(a_ ) )
if bit_search is None:
raise ValueError(F"`dtype` is not a valid dtype: {dtype}." )
SCREAMING_SNAKE_CASE : str = int(bit_search.groups()[0] )
return bit_size // 8
def __A ( a_ : str , a_ : Dict , a_ : Dict , a_ : List[str] , a_ : int )-> Dict:
'''simple docstring'''
if bloom_config_file == "":
SCREAMING_SNAKE_CASE : List[str] = BloomConfig()
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = BloomConfig.from_json_file(a_ )
if shard_model:
SCREAMING_SNAKE_CASE : Union[str, Any] = os.listdir(a_ )
SCREAMING_SNAKE_CASE : List[Any] = sorted(filter(lambda a_ : s.startswith('''layer''' ) and "model_00" in s , a_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''weight_map''': {}, '''metadata''': {}}
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = BloomConfig()
for j, file in enumerate(a_ ):
print('''Processing file: {}'''.format(a_ ) )
SCREAMING_SNAKE_CASE : List[str] = None
for i in range(a_ ):
# load all TP files
SCREAMING_SNAKE_CASE : Optional[Any] = file.replace('''model_00''' , F"model_0{i}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(os.path.join(a_ , a_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE : Dict = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE : Any = temp.pop(a_ )
if tensors is None:
SCREAMING_SNAKE_CASE : List[str] = temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE : Any = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE : Any = tensors[key] / pretraining_tp
torch.save(
a_ , os.path.join(
a_ , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE : Any = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE : int = BloomConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
SCREAMING_SNAKE_CASE : int = total_size
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n'''
f.write(a_ )
else:
SCREAMING_SNAKE_CASE : List[Any] = BloomModel(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = os.listdir(a_ )
SCREAMING_SNAKE_CASE : List[Any] = sorted(filter(lambda a_ : s.startswith('''layer''' ) and "model_00" in s , a_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = None
for i, file in enumerate(a_ ):
SCREAMING_SNAKE_CASE : List[str] = None
for i in range(a_ ):
# load all TP files
SCREAMING_SNAKE_CASE : str = file.replace('''model_00''' , F"model_0{i}" )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(os.path.join(a_ , a_ ) , map_location='''cpu''' )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE : Dict = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE : int = temp.pop(a_ )
if tensors is None:
SCREAMING_SNAKE_CASE : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE : Any = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE : Dict = model.load_state_dict(a_ , strict=a_ )
assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
SCREAMING_SNAKE_CASE : List[str] = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE : List[Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(a_ , exist_ok=a_ )
SCREAMING_SNAKE_CASE : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : List[str] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict() , a_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowerCamelCase__ : str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 18 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18 | 1 |
"""simple docstring"""
def __A ( a_ : int )-> int:
'''simple docstring'''
if n == 1 or not isinstance(a_ , a_ ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __A ( a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[Any] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Optional[Any] = len(str(fibonacci(a_ ) ) )
return index
def __A ( a_ : int = 10_00 )-> int:
'''simple docstring'''
return fibonacci_digits_index(a_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : int = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """maskformer-swin"""
UpperCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase_ :List[Any]=2_24 , lowerCamelCase_ :Tuple=4 , lowerCamelCase_ :Optional[Any]=3 , lowerCamelCase_ :List[str]=96 , lowerCamelCase_ :int=[2, 2, 6, 2] , lowerCamelCase_ :Union[str, Any]=[3, 6, 12, 24] , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Tuple=4.0 , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Dict=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :Optional[int]=False , lowerCamelCase_ :List[str]=0.0_2 , lowerCamelCase_ :Any=1E-5 , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : List[Any] = depths
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = num_heads
SCREAMING_SNAKE_CASE : Any = window_size
SCREAMING_SNAKE_CASE : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Any = use_absolute_embeddings
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 18 | 1 |
"""simple docstring"""
import torch
def __A ( )-> Optional[Any]:
'''simple docstring'''
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Dict = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 18 |
"""simple docstring"""
import math
class lowercase__:
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :List[str]=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = n
SCREAMING_SNAKE_CASE : List[Any] = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE : Any = [
[math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = w
def __lowerCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase__ : Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 18 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __A ( a_ : Optional[int] )-> List[str]:
'''simple docstring'''
def wrapper(*a_ : List[str] , **a_ : Dict ):
SCREAMING_SNAKE_CASE : int = timeit.default_timer()
SCREAMING_SNAKE_CASE : Any = func(*a_ , **a_ )
SCREAMING_SNAKE_CASE : str = timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE : int = func.__name__
return wrapper
def __A ( a_ : dict , a_ : str=1_00 , a_ : Optional[int]=None )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = seq_shapes or {}
for i in range(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(a_ , _ArrayXD ):
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(a_ , datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE : str = '''The small grey turtle was surprisingly fast when challenged.'''
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(a_ , datasets.Sequence ):
while isinstance(a_ , datasets.Sequence ):
SCREAMING_SNAKE_CASE : Optional[int] = v.feature
SCREAMING_SNAKE_CASE : List[Any] = seq_shapes[k]
SCREAMING_SNAKE_CASE : Any = np.random.rand(*a_ ).astype(v.dtype )
SCREAMING_SNAKE_CASE : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def __A ( a_ : List[Any] , a_ : Dict , a_ : List[str]=1_00 , a_ : Union[str, Any]=None )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = generate_examples(a_ , num_examples=a_ , seq_shapes=a_ )
with ArrowWriter(features=a_ , path=a_ ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE : int = features.encode_example(a_ )
writer.write(a_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.Dataset.from_file(filename=a_ , info=datasets.DatasetInfo(features=a_ ) )
return dataset
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowerCamelCase__ : int = get_logger(__name__)
class lowercase__:
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = module._original_module if isinstance(lowerCamelCase_ , _PatchedModuleObj ) else module
class lowercase__:
'''simple docstring'''
UpperCamelCase = []
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = obj
SCREAMING_SNAKE_CASE : Optional[Any] = target
SCREAMING_SNAKE_CASE : List[str] = new
SCREAMING_SNAKE_CASE : str = target.split('''.''' )[0]
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Any = attrs or []
def __enter__( self :str ) -> Tuple:
'''simple docstring'''
*SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCamelCase_ ) ):
try:
SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCamelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCamelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
SCREAMING_SNAKE_CASE : List[Any] = obj_attr
# patch at top level
setattr(self.obj , lowerCamelCase_ , _PatchedModuleObj(lowerCamelCase_ , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCamelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCamelCase_ , lowerCamelCase_ , _PatchedModuleObj(getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
# finally set the target attribute
setattr(lowerCamelCase_ , lowerCamelCase_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
SCREAMING_SNAKE_CASE : Any = getattr(import_module('''.'''.join(lowerCamelCase_ ) ) , lowerCamelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCamelCase_ ) is attr_value:
SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCamelCase_ )
setattr(self.obj , lowerCamelCase_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCamelCase_ , self.new )
else:
raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self :Union[str, Any] , *lowerCamelCase_ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , lowerCamelCase_ , self.original.pop(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 18 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.Tensor , lowerCamelCase_ :List[torch.tensor] , lowerCamelCase_ :List[float] , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[Dict[str, Any]] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, os.PathLike] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Callable = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional[str] = None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = model_path_to_save + f"_{idx}"
@classmethod
def __lowerCAmelCase ( cls :Dict , lowerCamelCase_ :Optional[Union[str, os.PathLike]] , **lowerCamelCase_ :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Dict = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + f"_{idx}"
logger.info(f"{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}." )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(lowerCamelCase_ )
| 18 | 1 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__:
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict=13 , lowerCamelCase_ :Dict=7 , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=False , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[Any]=99 , lowerCamelCase_ :List[str]=32 , lowerCamelCase_ :Union[str, Any]=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Dict=37 , lowerCamelCase_ :List[str]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Tuple=5_12 , lowerCamelCase_ :str=16 , lowerCamelCase_ :Dict=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Union[str, Any]=None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : List[str] = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __lowerCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , *lowerCamelCase_ :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = self.seq_length // 2
SCREAMING_SNAKE_CASE : str = 0
# first forward pass
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1,) , lowerCamelCase_ ).item() + 1
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE : int = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :str , *lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BioGptModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] , *lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM(lowerCamelCase_ )
model.to(lowerCamelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :int , *lowerCamelCase_ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BioGptModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[str] , *lowerCamelCase_ :Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Any = BioGptForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
def __lowerCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = BioGptModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : str = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ , gradient_checkpointing=lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Any = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE : List[str] = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : int = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''input_ids'''].to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=lowerCamelCase_ , attention_mask=inputs['''attention_mask'''].to(lowerCamelCase_ ) , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.generate(input_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE : Any = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : Any = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowerCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : int = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Any = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : int = input_ids.ne(1 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : Any = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : str = 4_23_84
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self :str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model.generate(
**lowerCamelCase_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=3 , lowerCamelCase_ :Optional[Any]=30 , lowerCamelCase_ :Dict=4_00 , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Tuple=1 / 2_55 , lowerCamelCase_ :Any=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : List[Any] = max_resolution
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : Dict = image_mean
SCREAMING_SNAKE_CASE : List[str] = image_std
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_pad
def __lowerCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = image.size
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : str = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE : Optional[int] = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE : Tuple = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE : Union[str, Any] = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE : Dict = []
for image in image_inputs:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE : List[str] = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def __lowerCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : int = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : int = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE : Any = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
SCREAMING_SNAKE_CASE : List[str] = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase_ ) )
# verify orig_size
SCREAMING_SNAKE_CASE : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase_ ) )
@slow
def __lowerCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
SCREAMING_SNAKE_CASE : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE : int = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE : str = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase_ ) )
# verify masks
SCREAMING_SNAKE_CASE : Union[str, Any] = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase_ )
# verify orig_size
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase_ ) )
| 18 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Optional[int] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def __A ( a_ : Any , a_ : Optional[Any] , a_ : List[str]=8 )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :DDPMScheduler , lowerCamelCase_ :VQModel , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :int ) -> List[Any]:
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE : int = latents.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Any=0 ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : List[str] = torch.device(f"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any]=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self :int ) -> Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self :str , lowerCamelCase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 1_00 , lowerCamelCase_ :float = 4.0 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self._execution_device
SCREAMING_SNAKE_CASE : List[Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Tuple = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Tuple = hint.repeat_interleave(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps
SCREAMING_SNAKE_CASE : List[str] = self.movq.config.latent_channels
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Any = {'''image_embeds''': image_embeds, '''hint''': hint}
SCREAMING_SNAKE_CASE : Dict = self.unet(
sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : str = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0]
# post-processing
SCREAMING_SNAKE_CASE : int = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : Optional[Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 18 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def __A ( a_ : str , a_ : Optional[Any]=1_00 , a_ : Dict=" " )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = text.split(a_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a_ ) , a_ )]
def __A ( a_ : dict )-> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(a_ ):
titles.append(title if title is not None else '''''' )
texts.append(a_ )
return {"title": titles, "text": texts}
def __A ( a_ : dict , a_ : DPRContextEncoder , a_ : DPRContextEncoderTokenizerFast )-> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[Any] = ctx_encoder(input_ids.to(device=a_ ) , return_dict=a_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __A ( a_ : "RagExampleArguments" , a_ : "ProcessingArguments" , a_ : "IndexHnswArguments" , )-> Optional[int]:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE : int = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE : List[str] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE : str = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_ ) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
SCREAMING_SNAKE_CASE : Dict = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(a_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=a_ )
# And save the index
SCREAMING_SNAKE_CASE : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(a_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
UpperCamelCase = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
UpperCamelCase = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
UpperCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
UpperCamelCase = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class lowercase__:
'''simple docstring'''
UpperCamelCase = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ : Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 18 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"vocab_file": "vocab.json"}
lowerCamelCase__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
lowerCamelCase__ : Optional[Any] = {"mgp-str": 27}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[int]="[GO]" , lowerCamelCase_ :int="[GO]" , lowerCamelCase_ :str="[s]" , lowerCamelCase_ :Dict="[GO]" , **lowerCamelCase_ :List[str] ) -> Tuple:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : int = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return len(self.vocab )
def __lowerCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Dict ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 18 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCamelCase__ : Optional[int] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
lowerCamelCase__ : Union[str, Any] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
lowerCamelCase__ : Any = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : Tuple = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : List[Any] = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 18 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """luke"""
def __init__( self :Optional[int] , lowerCamelCase_ :Union[str, Any]=5_02_67 , lowerCamelCase_ :int=50_00_00 , lowerCamelCase_ :Tuple=7_68 , lowerCamelCase_ :List[str]=2_56 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :Optional[int]=12 , lowerCamelCase_ :Optional[Any]=30_72 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :List[str]=5_12 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :Tuple=0.0_2 , lowerCamelCase_ :Optional[int]=1E-12 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :str=1 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :str=2 , **lowerCamelCase_ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = use_entity_aware_attention
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase__ : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase__ : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase__ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __A ( a_ : str , a_ : str )-> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(a_ ) if g == main_target[position]] )
return (item, float(a_ ))
def __A ( a_ : str , a_ : str )-> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(a_ ) - 1 )
SCREAMING_SNAKE_CASE : str = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __A ( a_ : str , a_ : list[str] )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(a_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Any = random.choice(a_ )
return "".join(a_ )
def __A ( a_ : tuple[str, float] , a_ : list[tuple[str, float]] , a_ : list[str] , )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(a_ ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , a_ )][0]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = crossover(parent_a[0] , a_ )
# Append new string to the population list.
pop.append(mutate(a_ , a_ ) )
pop.append(mutate(a_ , a_ ) )
return pop
def __A ( a_ : str , a_ : list[str] , a_ : bool = True )-> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(a_ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : str = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(a_ )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Tuple = []
for _ in range(a_ ):
population.append(''''''.join([random.choice(a_ ) for i in range(len(a_ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(a_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : int = [evaluate(a_ , a_ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : List[Any] = sorted(a_ , key=lambda a_ : x[1] , reverse=a_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : Optional[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(a_ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : Optional[int] = [
(item, score / len(a_ )) for item, score in population_score
]
# This is selection
for i in range(a_ ):
population.extend(select(population_score[int(a_ )] , a_ , a_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(a_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase__ : Dict = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase__ : int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 18 |
"""simple docstring"""
def __A ( a_ : list , a_ : int , a_ : int = 0 , a_ : int = 0 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.