code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : List[str] = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return np.sqrt(np.sum((np.asarray(__UpperCamelCase ) - np.asarray(__UpperCamelCase )) ** 2 ) )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return sum((va - va) ** 2 for va, va in zip(__UpperCamelCase , __UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def a__ ( ):
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 140 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = BlenderbotSmallConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : List[str] = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a = tf.concat([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_blenderbot_small_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a ):
__a = TFBlenderbotSmallModel(config=_a ).get_decoder()
__a = inputs_dict['''input_ids''']
__a = input_ids[:1, :]
__a = inputs_dict['''attention_mask'''][:1, :]
__a = inputs_dict['''head_mask''']
__a = 1
# first forward pass
__a = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a = tf.concat([input_ids, next_tokens] , axis=-1 )
__a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a = model(_a , attention_mask=_a )[0]
__a = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a = output_from_no_past[:, -3:, random_slice_idx]
__a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=None , ) -> Dict:
if attention_mask is None:
__a = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCAmelCase : str = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Optional[Any] = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Any = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : int = False
def __UpperCAmelCase ( self ):
__a = TFBlenderbotSmallModelTester(self )
__a = ConfigTester(self , config_class=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__UpperCAmelCase : int = 'facebook/blenderbot_small-90M'
@cached_property
def __UpperCAmelCase ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __UpperCAmelCase ( self ):
__a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer(self.src_text , return_tensors='''tf''' )
__a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_a , )
__a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 716 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowercase_ = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
lowercase_ = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __UpperCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None ):
__a = fa_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a )
return {"f1": float(_a ) if score.size == 1 else score}
| 65 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__UpperCAmelCase ):
"""simple docstring"""
_a : Optional[int] = ['''note_seq''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
requires_backends(self , ["""note_seq"""] )
@classmethod
def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
requires_backends(cls , ["""note_seq"""] )
| 200 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '▁'
__snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__snake_case = {
'facebook/xglm-564M': 2048,
}
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Optional[int] = VOCAB_FILES_NAMES
_a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
lowercase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Dict = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase__ : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowercase__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase__ : Optional[int] = len(self.sp_model )
lowercase__ : Union[str, Any] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase__ )
lowercase__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
lowercase__ : Any = self.__dict__.copy()
lowercase__ : List[str] = None
lowercase__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ) -> Optional[int]:
lowercase__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ ))
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : List[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : Dict = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ , """ """ ).strip()
return out_string
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 200 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_A = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''ernie_m'''
_lowercase ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , _UpperCamelCase = 250_002 , _UpperCamelCase = 768 , _UpperCamelCase = 12 , _UpperCamelCase = 12 , _UpperCamelCase = 3_072 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 514 , _UpperCamelCase = 0.02 , _UpperCamelCase = 1 , _UpperCamelCase = 1e-0_5 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=0.0 , **_UpperCamelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = is_decoder
lowerCAmelCase_ = act_dropout
| 279 |
import math
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ = n
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = w
def __a ( self ) -> List[str]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 279 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[int] = "distilbert"
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self : int , snake_case_ : Optional[int]=3_0_5_2_2 , snake_case_ : Union[str, Any]=5_1_2 , snake_case_ : List[str]=False , snake_case_ : List[str]=6 , snake_case_ : Dict=1_2 , snake_case_ : Union[str, Any]=7_6_8 , snake_case_ : List[Any]=4 * 7_6_8 , snake_case_ : List[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : Optional[Any]="gelu" , snake_case_ : str=0.02 , snake_case_ : str=0.1 , snake_case_ : Tuple=0.2 , snake_case_ : Tuple=0 , **snake_case_ : Union[str, Any] , ):
__a : List[str] = vocab_size
__a : Tuple = max_position_embeddings
__a : Optional[Any] = sinusoidal_pos_embds
__a : str = n_layers
__a : Tuple = n_heads
__a : Tuple = dim
__a : List[Any] = hidden_dim
__a : Any = dropout
__a : Optional[Any] = attention_dropout
__a : Any = activation
__a : int = initializer_range
__a : Dict = qa_dropout
__a : Dict = seq_classif_dropout
super().__init__(**snake_case_ , pad_token_id=snake_case_ )
class UpperCamelCase__ ( __lowercase ):
@property
def lowerCAmelCase (self : Optional[Any] ):
if self.task == "multiple-choice":
__a : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 521 |
from maths.prime_check import is_prime
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : str = f"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 214 |
def _A ( __snake_case :list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__SCREAMING_SNAKE_CASE = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : List[str] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 131 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Any, lowerCamelCase__ : Tuple=[] ):
_a = size[0] - overlap_pixels * 2
_a = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_a = np.ones((size_y, size_x), dtype=np.uinta ) * 255
_a = np.pad(lowerCamelCase__, mode="linear_ramp", pad_width=lowerCamelCase__, end_values=0 )
if "l" in remove_borders:
_a = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_a = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_a = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_a = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : Dict ):
return max(lowerCamelCase__, min(lowerCamelCase__, lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : [int], lowerCamelCase__ : [int], lowerCamelCase__ : [int] ):
return (
clamp(rect[0], min[0], max[0] ),
clamp(rect[1], min[1], max[1] ),
clamp(rect[2], min[0], max[0] ),
clamp(rect[3], min[1], max[1] ),
)
def _lowercase ( lowerCamelCase__ : [int], lowerCamelCase__ : int, lowerCamelCase__ : [int] ):
_a = list(lowerCamelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_a = clamp_rect(lowerCamelCase__, [0, 0], [image_size[0], image_size[1]] )
return rect
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = Image.new("RGB", (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]), Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ), (0, 0), )
result.paste(lowerCamelCase__, (original_slice, 0) )
return result
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int] ):
_a = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_a = tile.crop(lowerCamelCase__ )
return tile
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Tuple ):
_a = n % d
return n - divisor
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 3_5_0 , ) -> int:
super().__init__(
vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , max_noise_level=snake_case_ , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[Any]:
torch.manual_seed(0 )
_a = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_a = add_overlap_rect(snake_case_ , snake_case_ , image.size )
_a = image.crop(snake_case_ )
_a = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_a = translated_slice_x - (original_image_slice / 2)
_a = max(0 , snake_case_ )
_a = squeeze_tile(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = to_input.size
_a = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_a = super(snake_case_ , self ).__call__(image=snake_case_ , **snake_case_ ).images[0]
_a = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_a = unsqueeze_tile(snake_case_ , snake_case_ )
_a = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_a = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
_a = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=snake_case_ ) , mode="L" , )
final_image.paste(
snake_case_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = 7_5 , snake_case_ = 9.0 , snake_case_ = 5_0 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = 1 , snake_case_ = 1_2_8 , snake_case_ = 3_2 , snake_case_ = 3_2 , ) -> List[str]:
_a = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
_a = math.ceil(image.size[0] / tile_size )
_a = math.ceil(image.size[1] / tile_size )
_a = tcx * tcy
_a = 0
for y in range(snake_case_ ):
for x in range(snake_case_ ):
self._process_tile(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , prompt=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , noise_level=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def _lowercase ( ):
# Run a demo
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCamelCase__, revision="fp16", torch_dtype=torch.floataa )
_a = pipe.to("cuda" )
_a = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(lowerCamelCase__ : Dict ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save("diffusers_library_progress.jpg" )
_a = pipe(image=lowerCamelCase__, prompt="Black font, white background, vector", noise_level=40, callback=lowerCamelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 131 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ["image_processor", "tokenizer"]
UpperCamelCase = "Pix2StructImageProcessor"
UpperCamelCase = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : int , a_ : List[Any] , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = False
super().__init__(A_ , A_ )
def __call__( self : str , a_ : str=None , a_ : Tuple = None , a_ : Tuple = True , a_ : List[Any] = False , a_ : int = None , a_ : Optional[Any] = None , a_ : str = 20_48 , a_ : str = 0 , a_ : Any = None , a_ : Optional[int] = None , a_ : Any = False , a_ : Any = False , a_ : List[Any] = False , a_ : Optional[int] = False , a_ : Dict = False , a_ : int = True , a_ : str = None , **a_ : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
__UpperCAmelCase : List[str] = self.tokenizer
__UpperCAmelCase : Optional[Any] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__UpperCAmelCase : Any = self.image_processor(
A_ , return_tensors=A_ , max_patches=A_ , **A_ )
else:
# add pixel_values and bbox
__UpperCAmelCase : Optional[Any] = self.image_processor(
A_ , return_tensors=A_ , max_patches=A_ , header_text=A_ , **A_ )
if text is not None and not self.image_processor.is_vqa:
__UpperCAmelCase : List[str] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
if "attention_mask" in text_encoding:
__UpperCAmelCase : Dict = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
__UpperCAmelCase : Optional[int] = text_encoding.pop('''input_ids''' )
else:
__UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def snake_case__ ( self : Any , *a_ : Any , **a_ : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def snake_case__ ( self : Optional[Any] , *a_ : Any , **a_ : str ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.tokenizer.model_input_names
__UpperCAmelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 703 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__A ="%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
__A =f'''https://www.google.com/search?q={query}&num=100'''
__A =requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
__A =(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
__A =parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 241 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , ) -> Any:
'''simple docstring'''
_snake_case : Tuple = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_snake_case : Optional[int] = parent
_snake_case : int = batch_size
_snake_case : Dict = num_channels
_snake_case : List[Any] = min_resolution
_snake_case : Dict = max_resolution
_snake_case : Any = do_resize
_snake_case : Any = size
_snake_case : List[Any] = do_rescale
_snake_case : List[Any] = rescale_factor
_snake_case : Any = do_normalize
_snake_case : List[str] = image_mean
_snake_case : List[str] = image_std
_snake_case : Optional[Any] = do_pad
def __a ( self ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __a ( self , lowercase_ , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
_snake_case : Optional[Any] = image_inputs[0]
if isinstance(A_ , Image.Image ):
_snake_case : int = image.size
else:
_snake_case : Tuple = image.shape[1], image.shape[2]
if w < h:
_snake_case : Tuple = int(self.size['''shortest_edge'''] * h / w )
_snake_case : int = self.size["shortest_edge"]
elif w > h:
_snake_case : Optional[Any] = self.size["shortest_edge"]
_snake_case : Optional[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
_snake_case : int = self.size["shortest_edge"]
_snake_case : int = self.size["shortest_edge"]
else:
_snake_case : str = []
for image in image_inputs:
_snake_case : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : str = max(A_ , key=lambda lowercase_ : item[0] )[0]
_snake_case : List[str] = max(A_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = DetrImageProcessor if is_vision_available() else None
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : Any = DetrImageProcessingTester(self )
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_rescale''' ) )
self.assertTrue(hasattr(A_ , '''rescale_factor''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
self.assertTrue(hasattr(A_ , '''do_pad''' ) )
def __a ( self ) -> Any:
'''simple docstring'''
_snake_case : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
_snake_case : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
def __a ( self ) -> int:
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_snake_case : Tuple = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case : Tuple = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_snake_case : Optional[Any] = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_snake_case : Any = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case : str = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_snake_case : Optional[int] = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Any:
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_snake_case : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_snake_case : Dict = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case : Dict = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_snake_case : Any = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_snake_case : Optional[int] = json.loads(f.read() )
_snake_case : int = {"image_id": 3_9769, "annotations": target}
# encode them
_snake_case : int = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
_snake_case : str = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_snake_case : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_snake_case : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_snake_case : Union[str, Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_snake_case : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_snake_case : int = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_snake_case : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_snake_case : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_snake_case : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_snake_case : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_snake_case : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
_snake_case : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_snake_case : str = json.loads(f.read() )
_snake_case : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
_snake_case : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_snake_case : List[str] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
_snake_case : Any = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_snake_case : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_snake_case : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_snake_case : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_snake_case : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_snake_case : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_snake_case : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_snake_case : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_snake_case : int = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_snake_case : Any = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_snake_case : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_snake_case : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
| 326 |
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert gamma(0.5 ) == sqrt(lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 300 | 0 |
SCREAMING_SNAKE_CASE = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def UpperCamelCase_( lowerCamelCase_ ) -> str:
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
_lowercase : int = int(lowerCamelCase_ )
_lowercase : Dict = ''
_lowercase : List[str] = False
if decimal < 0:
_lowercase : int = True
decimal *= -1
while decimal > 0:
_lowercase : Any = divmod(lowerCamelCase_ , 16 )
_lowercase : int = values[remainder] + hexadecimal
_lowercase : Dict = '0x' + hexadecimal
if negative:
_lowercase : Union[str, Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCamelCase( _a ):
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowercase : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowercase : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowercase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase)
BertModel.from_pretrained(lowerCamelCase)
BertTokenizer.from_pretrained(lowerCamelCase)
pipeline(task='fill-mask', model=lowerCamelCase)
# baseline - just load from_pretrained with normal network
_lowercase : Any = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
_lowercase : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Any = '1'
_lowercase : List[Any] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowercase : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowercase : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowercase : Any = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase)
BertModel.from_pretrained(lowerCamelCase)
BertTokenizer.from_pretrained(lowerCamelCase)
pipeline(task='fill-mask', model=lowerCamelCase)
# baseline - just load from_pretrained with normal network
_lowercase : List[str] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
_lowercase : Any = self.get_env()
_lowercase : str = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_lowercase : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_lowercase : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_lowercase : int = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
_lowercase : List[str] = self.get_env()
_lowercase : Dict = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# next emulate no network
_lowercase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Optional[Any] = '1'
_lowercase : Optional[Any] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = '\nfrom transformers import pipeline\n '
_lowercase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_lowercase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_lowercase : Tuple = self.get_env()
_lowercase : Tuple = '1'
_lowercase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
_lowercase : Tuple = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 1, result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', ''), )
@require_torch
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = '\nfrom transformers import AutoModel\n '
_lowercase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_lowercase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
_lowercase : int = self.get_env()
_lowercase : List[str] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Tuple = '1'
_lowercase : Dict = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
| 354 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : Dict=[30, 30] , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : List[str]=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : int=None , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Dict=10 , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = n_targets
lowercase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ = {}
lowercase__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase )
lowercase__ = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase )
labels.append(_UpperCAmelCase )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = YolosModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = YolosForObjectDetection(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(pixel_values=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
A__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int=False ) -> str:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ = []
for i in range(self.model_tester.batch_size ):
lowercase__ = {}
lowercase__ = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long )
lowercase__ = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float )
labels.append(_UpperCAmelCase )
lowercase__ = labels
return inputs_dict
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = YolosModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
# in YOLOS, the seq_len is different
lowercase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# YOLOS has a different seq_length
lowercase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = YolosModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(inputs.pixel_values )
# verify outputs
lowercase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=_UpperCAmelCase , )
lowercase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify postprocessing
lowercase__ = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(_UpperCAmelCase )
lowercase__ = [75, 75, 17, 63, 17]
lowercase__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(_UpperCAmelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , _UpperCAmelCase , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , _UpperCAmelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _UpperCAmelCase ) )
| 15 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ) -> Any:
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(_UpperCamelCase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 487 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=100 , lowerCamelCase_=" ") -> List[str]:
UpperCamelCase__ : Optional[int] = text.split(lowerCamelCase_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(lowerCamelCase_) , lowerCamelCase_)]
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
UpperCamelCase__ : Any = [], []
for title, text in zip(documents['title'] , documents['text']):
if text is not None:
for passage in split_text(lowerCamelCase_):
titles.append(title if title is not None else '')
texts.append(lowerCamelCase_)
return {"title": titles, "text": texts}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> dict:
UpperCamelCase__ : str = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCamelCase_ , padding='longest' , return_tensors='pt')['input_ids']
UpperCamelCase__ : Dict = ctx_encoder(input_ids.to(device=lowerCamelCase_) , return_dict=lowerCamelCase_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
######################################
logger.info('Step 1 - Create the dataset')
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCamelCase__ : Union[str, Any] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCamelCase__ : str = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=processing_args.num_proc)
# And compute the embeddings
UpperCamelCase__ : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=lowerCamelCase_)
UpperCamelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
UpperCamelCase__ : str = Features(
{'text': Value('string'), 'title': Value('string'), 'embeddings': Sequence(Value('float32'))}) # optional, save as float32 instead of float64 to save space
UpperCamelCase__ : List[Any] = dataset.map(
partial(lowerCamelCase_ , ctx_encoder=lowerCamelCase_ , ctx_tokenizer=lowerCamelCase_) , batched=lowerCamelCase_ , batch_size=processing_args.batch_size , features=lowerCamelCase_ , )
# And finally save your dataset
UpperCamelCase__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset')
dataset.save_to_disk(lowerCamelCase_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset')
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCamelCase__ : List[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase_)
# And save the index
UpperCamelCase__ : int = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss')
dataset.get_index('embeddings').save(lowerCamelCase_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_lowerCamelCase = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_lowerCamelCase = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_lowerCamelCase = field(
default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=__lowerCamelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_lowerCamelCase = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class __lowercase :
_lowerCamelCase = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Dict = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : List[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : Optional[int] = value
elif weight_type == "weight_v":
_snake_case : List[str] = value
elif weight_type == "bias":
_snake_case : Optional[int] = value
else:
_snake_case : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
_snake_case : Any = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_snake_case : Tuple = True
if "*" in mapped_key:
_snake_case : Dict = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Optional[int] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : int = """weight_g"""
elif "weight_v" in name:
_snake_case : Tuple = """weight_v"""
elif "weight" in name:
_snake_case : Optional[int] = """weight"""
elif "bias" in name:
_snake_case : str = """bias"""
else:
_snake_case : Tuple = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_snake_case : Tuple = name.split(""".""" )
_snake_case : Dict = int(items[0] )
_snake_case : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : Optional[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Tuple=True ):
"""simple docstring"""
if config_path is not None:
_snake_case : Dict = HubertConfig.from_pretrained(snake_case__ )
else:
_snake_case : List[str] = HubertConfig()
if is_finetuned:
if dict_path:
_snake_case : Optional[int] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Optional[int] = target_dict.pad_index
_snake_case : Union[str, Any] = target_dict.bos_index
_snake_case : List[Any] = target_dict.eos_index
_snake_case : List[str] = len(target_dict.symbols )
_snake_case : Any = os.path.join(snake_case__ , """vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_snake_case : Any = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case__ , )
_snake_case : List[str] = True if config.feat_extract_norm == """layer""" else False
_snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case : Optional[Any] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_snake_case : Tuple = HubertForCTC(snake_case__ )
else:
_snake_case : Any = HubertModel(snake_case__ )
if is_finetuned:
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_snake_case : List[str] = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 609 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "xlm-roberta"
def __init__( self: Tuple, a_: Dict=30_522, a_: int=768, a_: int=12, a_: Any=12, a_: List[str]=3_072, a_: str="gelu", a_: Optional[int]=0.1, a_: str=0.1, a_: List[str]=512, a_: Optional[int]=2, a_: Optional[int]=0.02, a_: int=1E-12, a_: str=1, a_: List[str]=0, a_: Optional[int]=2, a_: List[Any]="absolute", a_: Optional[int]=True, a_: List[Any]=None, **a_: List[str], ):
'''simple docstring'''
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
_snake_case : str = vocab_size
_snake_case : int = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : int = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : Any = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Any = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[int] = classifier_dropout
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 609 | 1 |
'''simple docstring'''
import math
def _UpperCamelCase ( lowerCAmelCase__: int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCAmelCase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( lowerCAmelCase__: float = 0.1 ) -> int:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 238 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : int = '''xlm-roberta'''
def __init__( self , a_=3_0522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1E-12 , a_=1 , a_=0 , a_=2 , a_="absolute" , a_=True , a_=None , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : int = type_vocab_size
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : Any = position_embedding_type
lowerCamelCase_ : Optional[int] = use_cache
lowerCamelCase_ : Union[str, Any] = classifier_dropout
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 250 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : List[str] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : List[Any] = use_attention_mask
lowerCamelCase_ : Tuple = use_token_type_ids
lowerCamelCase_ : Dict = use_labels
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Optional[Any] = num_choices
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Any = None
if self.use_attention_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : List[str] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a_ )
lowerCamelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowerCamelCase_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Any = model(a_ )[0]
lowerCamelCase_ : List[str] = 5_0000
lowerCamelCase_ : List[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , a_ )
lowerCamelCase_ : Tuple = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 250 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase_ = _symbol_database.Default()
lowercase_ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
lowercase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase_ = None
lowercase_ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase_ = 45
lowercase_ = 15_81
lowercase_ = 15_17
lowercase_ = 15_70
lowercase_ = 15_84
lowercase_ = 17_93
lowercase_ = 17_95
lowercase_ = 19_16
lowercase_ = 18_64
lowercase_ = 19_05
lowercase_ = 19_19
lowercase_ = 24_29
lowercase_ = 22_08
lowercase_ = 24_18
lowercase_ = 23_23
lowercase_ = 24_07
# @@protoc_insertion_point(module_scope)
| 390 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lt__( self : Tuple , _lowerCAmelCase : Optional[int] ):
return self[-1] < other[-1]
def __eq__( self : Tuple , _lowerCAmelCase : Tuple ):
return self[-1] == other[-1]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
__snake_case : list[Stack] = []
# sort into stacks
for element in collection:
__snake_case : Dict = Stack([element] )
__snake_case : int = bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if i != len(__SCREAMING_SNAKE_CASE ):
stacks[i].append(__SCREAMING_SNAKE_CASE )
else:
stacks.append(__SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
__snake_case : int = merge(*(reversed(__SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 390 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = torch.nn.Linear(10 , 10 )
_UpperCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
_UpperCamelCase = Accelerator()
_UpperCamelCase = accelerator.prepare(_A )
try:
pickle.loads(pickle.dumps(_A ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 10 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
_A = 'src/transformers'
# Matches is_xxx_available()
_A = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_A = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_A = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_A = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_A = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_A = re.compile(R'^\s*try:')
# Catches a line with else:
_A = re.compile(R'^\s*else:')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
lowercase_ : Optional[int] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : List[str] = f.readlines()
lowercase_ : List[str] = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ : int = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase_ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
lowercase_ : Union[str, Any] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
lowercase_ : List[str] = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase_ : int = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
lowercase_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ : List[Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase_ : Tuple = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
lowercase_ : str = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(', ' )
lowercase_ : Optional[int] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
lowercase_ : Tuple = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(', ' )
lowercase_ : Any = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase_ : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ : Optional[int] = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase_ : Dict = lines[line_index]
lowercase_ : Dict = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ : Any = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase_ : Tuple = lines[line_index]
lowercase_ : Union[str, Any] = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase_ : List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def find_duplicates(SCREAMING_SNAKE_CASE_ ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ : Any = []
for key in import_dict_objects.keys():
lowercase_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase_ : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ : Dict = 'base imports' if key == 'none' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _UpperCamelCase ( ):
lowercase_ : List[Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
lowercase_ : int = os.path.join(SCREAMING_SNAKE_CASE_ , '__init__.py' )
lowercase_ : Union[str, Any] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
lowercase_ : Dict = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase_ : Optional[Any] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE_ ) )
def _UpperCamelCase ( ):
lowercase_ : Optional[int] = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase_ : Optional[Any] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
lowercase_ : Tuple = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ : Union[str, Any] = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
lowercase_ : List[str] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
_A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _UpperCamelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ : str = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase_ : Union[str, Any] = spec.loader.load_module()
lowercase_ : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase_ : int = '\n'.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 438 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
_A = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
_A = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
_A = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
_A = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = ["input_ids"]
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_INIT_CONFIGURATION
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = PRETRAINED_VOCAB_FILES_MAP
A : Any = RESOURCE_FILES_NAMES
def __init__(self , _a , _a=None , _a=False , _a="utf8" , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , vocab_file=_a , encoding=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowercase_ : int = do_lower_case
lowercase_ : str = sentencepiece_model_ckpt
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase_ : Union[str, Any] = self.load_vocab(filepath=_a )
else:
lowercase_ : Any = {self.sp_model.id_to_piece(_a ): id for id in range(self.sp_model.get_piece_size() )}
lowercase_ : Optional[int] = {v: k for k, v in self.vocab.items()}
def _lowerCamelCase (self , _a ) -> Any:
if text is None:
return None
lowercase_ : Dict = self.tokenize(_a )
lowercase_ ,lowercase_ : Dict = '', []
for i, ch in enumerate(_a ):
if ch in self.SP_CHAR_MAPPING:
lowercase_ : str = self.SP_CHAR_MAPPING.get(_a )
else:
lowercase_ : List[str] = unicodedata.normalize('NFKC' , _a )
if self.is_whitespace(_a ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_a ) )
lowercase_ ,lowercase_ ,lowercase_ : int = normalized_text, [], 0
if self.do_lower_case:
lowercase_ : Optional[Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase_ : Tuple = token[1:]
lowercase_ : List[str] = text[offset:].index(_a ) + offset
lowercase_ : Any = start + len(_a )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase_ : Tuple = end
return token_mapping
@property
def _lowerCamelCase (self ) -> List[Any]:
return len(self.vocab )
def _lowerCamelCase (self ) -> int:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self ) -> Optional[Any]:
lowercase_ : Dict = self.__dict__.copy()
lowercase_ : List[str] = None
return state
def __setstate__(self , _a ) -> Any:
lowercase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase_ : Tuple = {}
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowerCamelCase (self , _a ) -> str:
return "".join((self.SP_CHAR_MAPPING.get(_a , _a ) for c in text) )
def _lowerCamelCase (self , _a , _a=False , _a=64 , _a=0.1 ) -> Optional[int]:
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowercase_ : Optional[int] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowercase_ : Union[str, Any] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowercase_ : Optional[int] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowercase_ : Optional[Any] = self.sp_model.EncodeAsPieces(_a )
else:
lowercase_ : List[Any] = self.sp_model.SampleEncodeAsPieces(_a , _a , _a )
lowercase_ : int = []
for pi, piece in enumerate(_a ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_a ) and pi != 0:
new_pieces.append(_a )
continue
else:
continue
lowercase_ : Optional[Any] = 0
for i, chunk in enumerate(_a ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_a ) or self.is_punct(_a ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_a )
lowercase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ : Optional[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase_ : int = i
if len(_a ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowerCamelCase (self , _a ) -> Union[str, Any]:
lowercase_ : Optional[Any] = ''.join(_a ).replace(_a , ' ' ).strip()
return out_string
def _lowerCamelCase (self , _a ) -> Optional[int]:
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(_a )
lowercase_ : Tuple = ''.join(_a ).replace(_a , ' ' ).strip()
return out_string
def _lowerCamelCase (self , _a ) -> Optional[int]:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def _lowerCamelCase (self , _a ) -> int:
return self.reverse_vocab.get(_a , self.unk_token )
def _lowerCamelCase (self , _a , _a=None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
lowercase_ : int = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowerCamelCase (self , _a , _a=None ) -> Optional[Any]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowerCamelCase (self , _a , _a=None , _a=False ) -> Union[str, Any]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_a ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_a ) + 1) + [1] * (len(_a ) + 3)
def _lowerCamelCase (self , _a ) -> int:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowerCamelCase (self , _a ) -> List[str]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowerCamelCase (self , _a ) -> int:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowerCamelCase (self , _a ) -> List[Any]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_a ) == 1:
lowercase_ : Optional[Any] = unicodedata.category(_a )
if cat == "Zs":
return True
return False
def _lowerCamelCase (self , _a ) -> Tuple:
lowercase_ : List[Any] = {}
with io.open(_a , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_a ):
lowercase_ : Dict = line.rstrip('\n' )
lowercase_ : Dict = int(_a )
return token_to_idx
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
lowercase_ : Tuple = 0
if os.path.isdir(_a ):
lowercase_ : Any = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowercase_ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_a , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowercase_ : Tuple = token_index
writer.write(token + '\n' )
index += 1
lowercase_ : List[str] = os.path.join(_a , 'sentencepiece.bpe.model' )
with open(_a , 'wb' ) as fi:
lowercase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (vocab_file,)
| 438 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase : str = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : str=32 ) -> Optional[Any]:
set_seed(0 )
snake_case__ = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
snake_case__ = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
snake_case__ = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
snake_case__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
snake_case__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
snake_case__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case__ = [torch.randn((4, 3, 32, 32) ).to(lowerCAmelCase__ ) for _ in range(4 )]
snake_case__ = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
snake_case__ , snake_case__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case__ = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case__ = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
snake_case__ , snake_case__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
snake_case__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
snake_case__ = model(lowerCAmelCase__ , timesteps[i] ).sample
snake_case__ = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
| 214 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : int = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCAmelCase : List[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowercase ( __UpperCamelCase : str ):
snake_case__ = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 214 | 1 |
from manim import *
class A ( lowerCamelCase_ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Optional[int] = Rectangle(height=0.5 , width=0.5)
_lowercase: Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
_lowercase: Optional[Any] = [mem.copy() for i in range(6)]
_lowercase: Union[str, Any] = [mem.copy() for i in range(6)]
_lowercase: str = VGroup(*_UpperCamelCase).arrange(_UpperCamelCase , buff=0)
_lowercase: Union[str, Any] = VGroup(*_UpperCamelCase).arrange(_UpperCamelCase , buff=0)
_lowercase: Union[str, Any] = VGroup(_UpperCamelCase , _UpperCamelCase).arrange(_UpperCamelCase , buff=0)
_lowercase: Any = Text("CPU" , font_size=24)
_lowercase: List[Any] = Group(_UpperCamelCase , _UpperCamelCase).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase)
cpu.move_to([-2.5, -0.5, 0])
self.add(_UpperCamelCase)
_lowercase: List[Any] = [mem.copy() for i in range(4)]
_lowercase: Tuple = VGroup(*_UpperCamelCase).arrange(_UpperCamelCase , buff=0)
_lowercase: Optional[int] = Text("GPU" , font_size=24)
_lowercase: Any = Group(_UpperCamelCase , _UpperCamelCase).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase)
gpu.move_to([-1, -1, 0])
self.add(_UpperCamelCase)
_lowercase: List[str] = [mem.copy() for i in range(6)]
_lowercase: Optional[int] = VGroup(*_UpperCamelCase).arrange(_UpperCamelCase , buff=0)
_lowercase: Any = Text("Model" , font_size=24)
_lowercase: List[Any] = Group(_UpperCamelCase , _UpperCamelCase).arrange(_UpperCamelCase , buff=0.5 , aligned_edge=_UpperCamelCase)
model.move_to([3, -1.0, 0])
self.add(_UpperCamelCase)
_lowercase: Dict = []
for i, rect in enumerate(_UpperCamelCase):
rect.set_stroke(_UpperCamelCase)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowercase: Any = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3).set_stroke(width=0.0).set_fill(_UpperCamelCase , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=_UpperCamelCase)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_UpperCamelCase , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_UpperCamelCase , buff=0.0)
self.add(_UpperCamelCase)
cpu_targs.append(_UpperCamelCase)
_lowercase: Dict = [mem.copy() for i in range(6)]
_lowercase: Tuple = VGroup(*_UpperCamelCase).arrange(_UpperCamelCase , buff=0)
_lowercase: Tuple = Text("Loaded Checkpoint" , font_size=24)
_lowercase: Union[str, Any] = Group(_UpperCamelCase , _UpperCamelCase).arrange(_UpperCamelCase , aligned_edge=_UpperCamelCase , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
_lowercase: Tuple = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_lowercase: Dict = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(_UpperCamelCase , _UpperCamelCase)
_lowercase: Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_UpperCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left())
_lowercase: List[Any] = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(_UpperCamelCase) , Write(_UpperCamelCase))
self.play(Write(_UpperCamelCase , run_time=1) , Create(_UpperCamelCase , run_time=1))
_lowercase: Any = []
_lowercase: Tuple = []
for i, rect in enumerate(_UpperCamelCase):
_lowercase: Any = fill.copy().set_fill(_UpperCamelCase , opacity=0.7)
target.move_to(_UpperCamelCase)
first_animations.append(GrowFromCenter(_UpperCamelCase , run_time=1))
_lowercase: Tuple = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(_UpperCamelCase , run_time=1.5))
self.play(*_UpperCamelCase)
self.play(*_UpperCamelCase)
self.wait()
| 719 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_SCREAMING_SNAKE_CASE : Any = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModel)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 206 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase: List[str] = logging.get_logger(__name__)
_lowercase: Any = '''▁'''
_lowercase: List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase: List[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_lowercase: Any = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ =["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowercase__ : Dict , lowercase__ : List[str]="<s>" , lowercase__ : Any="</s>" , lowercase__ : List[str]="</s>" , lowercase__ : List[Any]="<s>" , lowercase__ : Any="<unk>" , lowercase__ : List[Any]="<pad>" , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[Any] , ):
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCAmelCase = 7
_lowerCAmelCase = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
_lowerCAmelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase__ )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , lowercase__ : Optional[Any] ):
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None , lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ ))
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ ))
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : str ):
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : List[Any] ):
_lowerCAmelCase = ''.join(lowercase__ ).replace(lowercase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : str , lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 192 |
from math import ceil, sqrt
def _lowerCamelCase ( snake_case = 1_000_000 ):
_lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 192 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowercase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowercase : int = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCamelCase_ = self.transformer_dir
shutil.copy(
os.path.join(_lowerCAmelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> str:
'''simple docstring'''
lowerCamelCase_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCamelCase_ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCamelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase_ = black.format_str(_lowerCAmelCase , mode=_lowerCAmelCase )
lowerCamelCase_ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_lowerCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(_lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_lowerCAmelCase )
with open(_lowerCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , _lowerCAmelCase )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _lowerCAmelCase ) , )
# Copy consistency with a really long name
lowerCamelCase_ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , _lowerCAmelCase , _lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _lowerCAmelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _lowerCAmelCase ) , )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCamelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCamelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCamelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCamelCase_ , lowerCamelCase_ = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowerCAmelCase )
lowerCamelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCamelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCamelCase_ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCamelCase_ , lowerCamelCase_ = check_copies.convert_to_localized_md(
_lowerCAmelCase , _lowerCAmelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 709 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[Any] = "van"
def __init__( self , UpperCamelCase__=224 , UpperCamelCase__=3 , UpperCamelCase__=[7, 3, 3, 3] , UpperCamelCase__=[4, 2, 2, 2] , UpperCamelCase__=[64, 128, 320, 512] , UpperCamelCase__=[3, 3, 12, 3] , UpperCamelCase__=[8, 8, 4, 4] , UpperCamelCase__="gelu" , UpperCamelCase__=0.02 , UpperCamelCase__=1e-6 , UpperCamelCase__=1e-2 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_sizes
lowerCamelCase_ = strides
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = mlp_ratios
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = dropout_rate
| 66 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 385 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase : Tuple = bs[:]
__UpperCAmelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase_ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Union[str, Any] = [chr(lowercase_ ) for n in cs]
return dict(zip(lowercase_ , lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Tuple = char
return pairs
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase__ , lowercase__ , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , **lowercase__ , ):
__UpperCAmelCase : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else bos_token
__UpperCAmelCase : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else eos_token
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else sep_token
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else cls_token
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else unk_token
__UpperCAmelCase : List[str] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding='''utf-8''') as vocab_handle:
__UpperCAmelCase : Optional[int] = json.load(lowercase__)
__UpperCAmelCase : List[str] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Optional[Any] = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ , encoding='''utf-8''') as merges_handle:
__UpperCAmelCase : Optional[int] = merges_handle.read().split('''\n''')[1:-1]
__UpperCAmelCase : int = [tuple(merge.split()) for merge in bpe_merges]
__UpperCAmelCase : str = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : List[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A( self):
return len(self.encoder)
def A( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A( self , lowercase__):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : int = tuple(lowercase__)
__UpperCAmelCase : int = get_pairs(lowercase__)
if not pairs:
return token
while True:
__UpperCAmelCase : Union[str, Any] = min(lowercase__ , key=lambda lowercase__: self.bpe_ranks.get(lowercase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Tuple = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 0
while i < len(lowercase__):
try:
__UpperCAmelCase : List[Any] = word.index(lowercase__ , lowercase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__UpperCAmelCase : str = j
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCAmelCase : Union[str, Any] = tuple(lowercase__)
__UpperCAmelCase : Dict = new_word
if len(lowercase__) == 1:
break
else:
__UpperCAmelCase : Optional[int] = get_pairs(lowercase__)
__UpperCAmelCase : List[Any] = ''' '''.join(lowercase__)
__UpperCAmelCase : Tuple = word
return word
def A( self , lowercase__):
__UpperCAmelCase : str = []
for token in re.findall(self.pat , lowercase__):
__UpperCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__).split(''' '''))
return bpe_tokens
def A( self , lowercase__):
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A( self , lowercase__):
return self.decoder.get(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : str = ''''''.join(lowercase__)
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : List[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Optional[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowercase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '''\n''')
__UpperCAmelCase : Tuple = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''')
__UpperCAmelCase : Optional[int] = token_index
writer.write(''' '''.join(lowercase__) + '''\n''')
index += 1
return vocab_file, merge_file
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A( self , lowercase__ , lowercase__ = None , lowercase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__)
if token_ids_a is None:
return [1] + ([0] * len(lowercase__)) + [1]
return [1] + ([0] * len(lowercase__)) + [1, 1] + ([0] * len(lowercase__)) + [1]
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A( self , lowercase__ , lowercase__=False , **lowercase__):
__UpperCAmelCase : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowercase__) > 0 and not text[0].isspace()):
__UpperCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = PaddingStrategy.DO_NOT_PAD , lowercase__ = None , lowercase__ = None , ):
__UpperCAmelCase : Optional[Any] = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : int = len(encoded_inputs['''global_attention_mask''']) != len(lowercase__)
if needs_to_be_padded:
__UpperCAmelCase : Dict = len(lowercase__) - len(encoded_inputs['''global_attention_mask'''])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : Optional[Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side))
return encoded_inputs
| 462 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __snake_case ( a ):
def __init__( self : str , _snake_case : Union[str, "sqlalchemy.sql.Selectable"] , _snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , **_snake_case : str , ):
"""simple docstring"""
super().__init__(features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , **_snake_case)
UpperCAmelCase_ = Sql(
cache_dir=_snake_case , features=_snake_case , sql=_snake_case , con=_snake_case , **_snake_case , )
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , )
# Build dataset for splits
UpperCAmelCase_ = self.builder.as_dataset(
split='''train''' , verification_mode=_snake_case , in_memory=self.keep_in_memory)
return dataset
class __snake_case :
def __init__( self : Any , _snake_case : Dataset , _snake_case : str , _snake_case : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , **_snake_case : int , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""")
UpperCAmelCase_ = dataset
UpperCAmelCase_ = name
UpperCAmelCase_ = con
UpperCAmelCase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ = num_proc
UpperCAmelCase_ = to_sql_kwargs
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.to_sql_kwargs.pop('''sql''' , _snake_case)
UpperCAmelCase_ = self.to_sql_kwargs.pop('''con''' , _snake_case)
UpperCAmelCase_ = self.to_sql_kwargs.pop('''index''' , _snake_case)
UpperCAmelCase_ = self._write(index=_snake_case , **self.to_sql_kwargs)
return written
def lowerCamelCase ( self : Dict , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = args
UpperCAmelCase_ = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
UpperCAmelCase_ = query_table(
table=self.dataset.data , key=slice(_snake_case , offset + self.batch_size) , indices=self.dataset._indices , )
UpperCAmelCase_ = batch.to_pandas()
UpperCAmelCase_ = df.to_sql(self.name , self.con , index=_snake_case , **_snake_case)
return num_rows or len(_snake_case)
def lowerCamelCase ( self : int , _snake_case : Optional[int] , **_snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
UpperCAmelCase_ , UpperCAmelCase_ = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _snake_case , _snake_case)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 169 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Dict = LxmertTokenizer
UpperCAmelCase__ : Tuple = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def lowerCamelCase ( self : str , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''UNwant\u00E9d,running'''
UpperCAmelCase_ = '''unwanted, running'''
return input_text, output_text
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file)
UpperCAmelCase_ = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(_snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [7, 4, 5, 10, 8, 9])
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
| 169 | 1 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__lowercase : Any = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
__lowercase : int = []
__lowercase : Any = []
__lowercase : List[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
__lowercase : List[Any] = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'''emoji''': True,
},
}
]
__lowercase : Tuple = 0
for log in Path().glob('''*.log'''):
__lowercase : Any = 0
with open(log, '''r''') as f:
for line in f:
__lowercase : str = json.loads(line)
if line.get('''nodeid''', '''''') != "":
__lowercase : Optional[Any] = line['''nodeid''']
if line.get('''duration''', None) is not None:
__lowercase : Optional[Any] = f'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__lowercase : Tuple = []
log.unlink()
__lowercase : Tuple = ''''''
__lowercase : str = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__lowercase : List[str] = []
__lowercase : int = {}
for test in failed_tests:
__lowercase : List[str] = test[0].split('''::''')
__lowercase : int = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
__lowercase : List[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__lowercase : Dict = [test[0] for test in failed_table]
__lowercase : Dict = list(set(files))
# Count number of instances in failed_tests
__lowercase : Tuple = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__lowercase : Optional[int] = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__lowercase : Dict = '''Too many failed tests, please see the full report in the Action results.'''
__lowercase : Dict = len(err) + 10
__lowercase : str = message[: 3000 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
__lowercase : str = '''No failed tests! 🤗'''
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
__lowercase : Dict = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
__lowercase : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
__lowercase : int = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
__lowercase : int = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
__lowercase : Dict = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
__lowercase : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__lowercase : Optional[int] = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__lowercase : List[str] = row[0]
else:
__lowercase : str = ''''''
__lowercase : Any = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 422 |
"""simple docstring"""
A = 8.31_4462 # Unit - J mol-1 K-1
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 449 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ArgumentParser('''Transformers CLI tool''', usage='''transformers-cli <command> [<args>]''' )
__magic_name__ :Union[str, Any] = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(snake_case )
DownloadCommand.register_subcommand(snake_case )
EnvironmentCommand.register_subcommand(snake_case )
RunCommand.register_subcommand(snake_case )
ServeCommand.register_subcommand(snake_case )
UserCommands.register_subcommand(snake_case )
AddNewModelCommand.register_subcommand(snake_case )
AddNewModelLikeCommand.register_subcommand(snake_case )
LfsCommands.register_subcommand(snake_case )
PTtoTFCommand.register_subcommand(snake_case )
# Let's go
__magic_name__ :str = parser.parse_args()
if not hasattr(snake_case, '''func''' ):
parser.print_help()
exit(1 )
# Run
__magic_name__ :Optional[int] = args.func(snake_case )
service.run()
if __name__ == "__main__":
main()
| 180 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = 0
__magic_name__ :Tuple = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__ :str = i + 1
else:
__magic_name__ :List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 180 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowerCAmelCase = 4
_lowerCAmelCase = 3
class UpperCamelCase (__snake_case ):
pass
def UpperCamelCase ( _A ) -> Optional[int]:
for shard in shards:
for i in range(_A ):
yield {"i": i, "shard": shard}
def UpperCamelCase ( ) -> List[Any]:
lowercase : List[str] = int(os.environ["""RANK"""] )
lowercase : str = int(os.environ["""WORLD_SIZE"""] )
lowercase : List[Any] = ArgumentParser()
parser.add_argument("""--streaming""" , type=_A )
parser.add_argument("""--local_rank""" , type=_A )
parser.add_argument("""--num_workers""" , type=_A , default=0 )
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Any = args.streaming
lowercase : Tuple = args.num_workers
lowercase : List[Any] = {"""shards""": [F"""shard_{shard_idx}""" for shard_idx in range(_A )]}
lowercase : Tuple = IterableDataset.from_generator(_A , gen_kwargs=_A )
if not streaming:
lowercase : Any = Dataset.from_list(list(_A ) )
lowercase : Any = split_dataset_by_node(_A , rank=_A , world_size=_A )
lowercase : Dict = torch.utils.data.DataLoader(_A , num_workers=_A )
lowercase : Tuple = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 264 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( _A = "AAPL" ) -> str:
lowercase : Optional[Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowercase : str = BeautifulSoup(requests.get(_A ).text , """html.parser""" )
lowercase : int = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 264 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :List[Any] = StableDiffusionDiffEditPipeline
UpperCamelCase_ :List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
UpperCamelCase_ :List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
UpperCamelCase_ :List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ :List[str] = frozenset([] )
def __snake_case ( self : str ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_zero=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
lowerCAmelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ):
lowerCAmelCase__ = floats_tensor((1, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0 ):
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('''RGB''' )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=0 ):
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('''RGB''' )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self : int ):
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = np.abs(output - output_loaded ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
def __snake_case ( self : int ):
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_dummy_mask_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe.generate_mask(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase__ = np.array([0] * 9 )
lowerCAmelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __snake_case ( self : str ):
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_dummy_inversion_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe.invert(**SCREAMING_SNAKE_CASE_ ).images
lowerCAmelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
def __snake_case ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
lowerCAmelCase__ = DPMSolverMultistepScheduler(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = DPMSolverMultistepInverseScheduler(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_dummy_inversion_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe.invert(**SCREAMING_SNAKE_CASE_ ).images
lowerCAmelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __snake_case ( cls : int ):
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
lowerCAmelCase__ = raw_image.convert('''RGB''' ).resize((768, 768) )
lowerCAmelCase__ = raw_image
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
lowerCAmelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''a bowl of fruit'''
lowerCAmelCase__ = '''a bowl of pears'''
lowerCAmelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=SCREAMING_SNAKE_CASE_ , target_prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = pipe.invert(
prompt=SCREAMING_SNAKE_CASE_ , image=self.raw_image , inpaint_strength=0.7 , generator=SCREAMING_SNAKE_CASE_ ).latents
lowerCAmelCase__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_latents=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
lowerCAmelCase__ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''a bowl of fruit'''
lowerCAmelCase__ = '''a bowl of pears'''
lowerCAmelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=SCREAMING_SNAKE_CASE_ , target_prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = pipe.invert(
prompt=SCREAMING_SNAKE_CASE_ , image=self.raw_image , inpaint_strength=0.7 , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , ).latents
lowerCAmelCase__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_latents=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
lowerCAmelCase__ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 288 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Dict = position
_UpperCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCamelCase : Optional[Any] = []
for position in positions:
_UpperCamelCase , _UpperCamelCase : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCAmelCase_ )
return permissible_positions
def A__ ( UpperCAmelCase_ ):
return not any(elem == 0 for row in board for elem in row )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if is_complete(UpperCAmelCase_ ):
return True
for position in get_valid_pos(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
_UpperCamelCase , _UpperCamelCase : Any = position
if board[y][x] == 0:
_UpperCamelCase : int = curr + 1
if open_knight_tour_helper(UpperCAmelCase_ , UpperCAmelCase_ , curr + 1 ):
return True
_UpperCamelCase : Union[str, Any] = 0
return False
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 1
if open_knight_tour_helper(UpperCAmelCase_ , (i, j) , 1 ):
return board
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : int = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a :int = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a :Optional[int] = None
a :Optional[Any] = logging.get_logger(__name__)
a :Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
a :Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
a :Any = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
a :Tuple = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :int = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE :Tuple = NllbTokenizer
_SCREAMING_SNAKE_CASE :List[int] = []
_SCREAMING_SNAKE_CASE :List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
SCREAMING_SNAKE_CASE__ : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE__ : str = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[str] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ : Dict = src_lang if src_lang is not None else """eng_Latn"""
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _a , _a , _a , _a , **_a ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ : Dict = src_lang
SCREAMING_SNAKE_CASE__ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_tokens_to_ids(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tgt_lang_id
return inputs
def _a ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = src_lang
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Dict = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ):
"""simple docstring"""
if isinstance(__magic_name__ , torch.Tensor ):
return image
elif isinstance(__magic_name__ , PIL.Image.Image ):
_lowerCAmelCase :Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 )
_lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
_lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase :int = 2.0 * image - 1.0
_lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 )
return image
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ):
"""simple docstring"""
if not isinstance(__magic_name__ , np.ndarray ):
_lowerCAmelCase :Tuple = True
_lowerCAmelCase :str = va.device
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :List[str] = va.cpu().numpy()
_lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) )
if np.abs(__magic_name__ ) > DOT_THRESHOLD:
_lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase :int = np.arccos(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ )
_lowerCAmelCase :Union[str, Any] = theta_a * t
_lowerCAmelCase :str = np.sin(__magic_name__ )
_lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a
_lowerCAmelCase :List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
return va
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 )
_lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
for param in model.parameters():
_lowerCAmelCase :List[str] = value
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , )
_lowerCAmelCase :int = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCAmelCase )
set_requires_grad(self.clip_model , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
self.enable_attention_slicing(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
set_requires_grad(self.vae , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
set_requires_grad(self.unet , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ):
# get the original timestep using init_timestep
_lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase )
_lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" )
_lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :List[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase )
]
_lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 )
else:
_lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 )
_lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
# get latents
_lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ):
_lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ):
_lowerCAmelCase :Dict = latents.detach().requires_grad_()
_lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase :Optional[int] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase )
_lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Dict = self.scheduler.sigmas[index]
_lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase )
_lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype )
_lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase )
_lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase )
_lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale
_lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0]
if isinstance(self.scheduler , _UpperCAmelCase ):
_lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2)
_lowerCAmelCase :Dict = noise_pred_original
else:
_lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1:
_lowerCAmelCase :int = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase :List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase )
if style_prompt is None:
if len(_UpperCAmelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase )
# get prompt text embeddings for content and style
_lowerCAmelCase :Any = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , )
_lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# set timesteps
_lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase :Dict = {}
if accepts_offset:
_lowerCAmelCase :Optional[int] = 1
self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device )
_lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase )
# Preprocess image
_lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.prepare_latents(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase )
_lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if clip_guidance_scale > 0:
_lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Any = slerp(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase :int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1]
_lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' )
_lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase :Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
_lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase :int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase :Any = {}
if accepts_eta:
_lowerCAmelCase :Any = eta
# check if the scheduler accepts generator
_lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase :List[Any] = generator
with self.progress_bar(total=_UpperCAmelCase ):
for i, t in enumerate(_UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
_lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 )
_lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase :List[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 687 | 0 |
import itertools
import math
def lowercase_ (A : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ():
snake_case__ : int = 2
while True:
if is_prime(A ):
yield num
num += 1
def lowercase_ (A : int = 1_0_0_0_1 ):
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 243 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ :Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowercase_ ( self : Any ) ->List[str]:
snake_case__ : Optional[Any] = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='pt' )
snake_case__ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
snake_case__ : Any = text_classifier('This is great !', top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] )
snake_case__ : Any = text_classifier(['This is great !', 'This is bad'], top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ), [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
], )
snake_case__ : Union[str, Any] = text_classifier('This is great !', top_k=1 )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
# Legacy behavior
snake_case__ : List[str] = text_classifier('This is great !', return_all_scores=_snake_case )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
snake_case__ : List[Any] = text_classifier('This is great !', return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] )
snake_case__ : Optional[Any] = text_classifier(['This is great !', 'Something else'], return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
], )
snake_case__ : Optional[Any] = text_classifier(['This is great !', 'Something else'], return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [
{'label': 'LABEL_0', 'score': 0.5_0_4},
{'label': 'LABEL_0', 'score': 0.5_0_4},
], )
@require_torch
def lowercase_ ( self : Optional[Any] ) ->int:
import torch
snake_case__ : Dict = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='pt', device=torch.device('cpu' ), )
snake_case__ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@require_tf
def lowercase_ ( self : Union[str, Any] ) ->int:
snake_case__ : List[Any] = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='tf' )
snake_case__ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@slow
@require_torch
def lowercase_ ( self : Optional[Any] ) ->int:
snake_case__ : Optional[int] = pipeline('text-classification' )
snake_case__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 1.0}] )
snake_case__ : str = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'NEGATIVE', 'score': 1.0}] )
snake_case__ : Optional[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
@slow
@require_tf
def lowercase_ ( self : Dict ) ->Dict:
snake_case__ : Tuple = pipeline('text-classification', framework='tf' )
snake_case__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 1.0}] )
snake_case__ : int = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'NEGATIVE', 'score': 1.0}] )
snake_case__ : Optional[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
def lowercase_ ( self : List[str], _snake_case : List[str], _snake_case : List[str], _snake_case : Dict ) ->int:
snake_case__ : List[Any] = TextClassificationPipeline(model=_snake_case, tokenizer=_snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowercase_ ( self : Dict, _snake_case : List[str], _snake_case : str ) ->List[str]:
snake_case__ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case__ : int = 'HuggingFace is in'
snake_case__ : Union[str, Any] = text_classifier(_snake_case )
self.assertEqual(nested_simplify(_snake_case ), [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
snake_case__ : Optional[Any] = ['HuggingFace is in ', 'Paris is in France']
snake_case__ : Optional[Any] = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}, {'label': ANY(_snake_case ), 'score': ANY(_snake_case )}], )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case__ : Any = text_classifier(_snake_case, top_k=_snake_case )
snake_case__ : str = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_snake_case ), [[{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}] * N, [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}] * N], )
snake_case__ : List[Any] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
snake_case__ : Any = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), {'label': ANY(_snake_case ), 'score': ANY(_snake_case )}, )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case__ : Optional[Any] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(_snake_case ):
text_classifier(_snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case__ : str = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(_snake_case ), [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}], )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 243 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = question_encoder
__lowercase = generator
__lowercase = self.question_encoder
def snake_case__ ( self , lowerCAmelCase_ ):
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase = os.path.join(lowerCAmelCase_ , "question_encoder_tokenizer" )
__lowercase = os.path.join(lowerCAmelCase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowercase = kwargs.pop("config" , lowerCAmelCase_ )
if config is None:
__lowercase = RagConfig.from_pretrained(lowerCAmelCase_ )
__lowercase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
__lowercase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.question_encoder
def snake_case__ ( self ):
__lowercase = self.generator
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "longest" , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowerCAmelCase_ , )
if max_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowercase = self.current_tokenizer.model_max_length
__lowercase = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = labels["input_ids"]
return model_inputs
| 321 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_vision_model"""
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=288 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_text_model"""
def __init__( self , lowerCAmelCase_=5_0265 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower"""
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=768 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
# TODO: remove this once the Hub files are updated.
__lowercase = kwargs.pop("text_config_dict" , lowerCAmelCase_ )
__lowercase = kwargs.pop("vision_config_dict" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
__lowercase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
__lowercase = BridgeTowerTextConfig(**lowerCAmelCase_ )
__lowercase = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 321 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Any = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
A_ : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
A_ : Any = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
A_ : Dict = tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
A_ : List[Any] = output[output != -float("""inf""" )]
A_ : int = tf.cast(
tf.where(tf.not_equal(_snake_case , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-1_2 )
tf.debugging.assert_equal(_snake_case , _snake_case )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
if is_tf_available():
_A : List[Any] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def lowerCamelCase(self ):
A_ : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : Tuple = 2
A_ : Union[str, Any] = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ ):
super(_snake_case , self ).__init__()
A_ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_snake_case , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Optional[Any] = self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
A_ : Dict = [[2, 0], [102, 103]]
A_ : Any = [[1, 0], [1, 1]]
A_ : Dict = DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"""serving_default""": dummy_model.serving} )
A_ : Optional[Any] = tf.saved_model.load(_snake_case ).signatures["""serving_default"""]
for batch_size in range(1 , len(_snake_case ) + 1 ):
A_ : str = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
A_ : Dict = serving_func(**_snake_case )["""sequences"""]
A_ : Tuple = test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
def lowerCamelCase(self ):
A_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : int = 1
A_ : Dict = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ ):
super(_snake_case , self ).__init__()
A_ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_snake_case , )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Any = self.model.generate(
input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , )
return {"sequences": outputs["sequences"]}
A_ : Tuple = [[2], [102, 103]]
A_ : List[Any] = [[1], [1, 1]]
A_ : List[Any] = DummyModel(model=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_snake_case , _snake_case , signatures={"""serving_default""": dummy_model.serving} )
A_ : Optional[Any] = tf.saved_model.load(_snake_case ).signatures["""serving_default"""]
for input_row in range(len(_snake_case ) ):
A_ : Any = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
A_ : Union[str, Any] = serving_func(**_snake_case )["""sequences"""]
A_ : List[Any] = test_model.generate(**_snake_case , max_new_tokens=_snake_case )
tf.debugging.assert_equal(_snake_case , _snake_case )
@slow
@require_tensorflow_text
def lowerCamelCase(self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_snake_case )
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__(self ):
super().__init__()
A_ : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_snake_case , """spiece.model""" ) , """rb""" ).read() )
A_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowerCamelCase(self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : Union[str, Any] = self.tokenizer.tokenize(_snake_case )
A_ , A_ : str = text.pad_model_inputs(
_snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
A_ : Any = self.model.generate(input_ids=_snake_case , attention_mask=_snake_case )
return self.tokenizer.detokenize(_snake_case )
A_ : Union[str, Any] = CompleteSentenceTransformer()
A_ : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
A_ : Optional[Any] = complete_model(_snake_case )
A_ : Dict = tf.keras.Model(_snake_case , _snake_case )
keras_model.save(_snake_case )
def lowerCamelCase(self ):
A_ : Union[str, Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
A_ : str = 14
A_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : List[Any] = """Hello, my dog is cute and"""
A_ : List[str] = tokenizer(_snake_case , return_tensors="""tf""" )
A_ : Tuple = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A_ : int = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A_ : Any = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A_ : Tuple = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A_ : Any = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase(self ):
A_ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A_ : Tuple = """Hugging Face is a technology company based in New York and Paris."""
A_ : str = bart_tokenizer(_snake_case , return_tensors="""tf""" ).input_ids
A_ : Any = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A_ : Dict = bart_model.generate(_snake_case ).numpy()
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
"""simple docstring"""
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
return super().call(_snake_case , **_snake_case )
A_ : Tuple = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A_ : Union[str, Any] = bart_model.generate(_snake_case , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_snake_case , _snake_case ) )
class SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowerCamelCase(self , lowerCAmelCase_ , **lowerCAmelCase_ ):
return super().call(_snake_case , **_snake_case )
A_ : Union[str, Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
A_ : Optional[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A_ : Union[str, Any] = bart_model.generate(_snake_case ).numpy()
with self.assertRaises(_snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_snake_case , foo="""bar""" )
| 708 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : str = OmegaConf.load(snake_case__ )
A_ : List[str] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
A_ : Any = list(state_dict.keys() )
# extract state_dict for VQVAE
A_ : Any = {}
A_ : Union[str, Any] = """first_stage_model."""
for key in keys:
if key.startswith(snake_case__ ):
A_ : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
A_ : Tuple = {}
A_ : Dict = """model.diffusion_model."""
for key in keys:
if key.startswith(snake_case__ ):
A_ : Optional[Any] = state_dict[key]
A_ : List[Any] = config.model.params.first_stage_config.params
A_ : Tuple = config.model.params.unet_config.params
A_ : Any = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
A_ : Optional[int] = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
A_ : List[str] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
A_ : str = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_lowerCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 480 | 0 |
from typing import Any
import numpy as np
def a__ ( A__ ):
return np.array_equal(__UpperCamelCase, matrix.conjugate().T )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v.conjugate().T
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v_star.dot(__UpperCamelCase )
assert isinstance(__UpperCamelCase, np.ndarray )
return (v_star_dot.dot(__UpperCamelCase )) / (v_star.dot(__UpperCamelCase ))
def a__ ( ):
SCREAMING_SNAKE_CASE_ : int = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[1], [2], [3]] )
assert is_hermitian(__UpperCamelCase ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(__UpperCamelCase, __UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__UpperCamelCase ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(__UpperCamelCase, __UpperCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 101 |
class __a :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ ={}
def __A ( self : Tuple ,_UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE__ ={}
self.num_vertices += 1
def __A ( self : str ,_UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Any ) -> Any:
'''simple docstring'''
self.add_vertex(_UpperCamelCase )
self.add_vertex(_UpperCamelCase )
if head == tail:
return
SCREAMING_SNAKE_CASE__ =weight
SCREAMING_SNAKE_CASE__ =weight
def __A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ =list(edges[i] )
edges.sort(key=lambda _UpperCamelCase : e[2] )
for i in range(len(_UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE__ =edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
SCREAMING_SNAKE_CASE__ =weight
SCREAMING_SNAKE_CASE__ =weight
def __str__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =""""""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE__ =self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def __A ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def __A ( _UpperCamelCase : Union[str, Any]=None ,_UpperCamelCase : List[Any]=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =Graph()
if vertices is None:
SCREAMING_SNAKE_CASE__ =[]
if edges is None:
SCREAMING_SNAKE_CASE__ =[]
for vertex in vertices:
g.add_vertex(_UpperCamelCase )
for edge in edges:
g.add_edge(*_UpperCamelCase )
return g
class __a :
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ ={}
def __len__( self : int ) -> Optional[int]:
'''simple docstring'''
return len(self.parent )
def __A ( self : Any ,_UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if item in self.parent:
return self.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =item
SCREAMING_SNAKE_CASE__ =0
return item
def __A ( self : Union[str, Any] ,_UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(_UpperCamelCase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE__ =self.find(self.parent[item] )
return self.parent[item]
def __A ( self : Any ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =self.find(_UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE__ =roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE__ =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE__ =roota
return roota
return None
@staticmethod
def __A ( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =graph.num_vertices
SCREAMING_SNAKE_CASE__ =Graph.UnionFind()
SCREAMING_SNAKE_CASE__ =[]
while num_components > 1:
SCREAMING_SNAKE_CASE__ ={}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE__ =-1
SCREAMING_SNAKE_CASE__ =graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =edge
SCREAMING_SNAKE_CASE__ =union_find.find(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =union_find.find(_UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =cheap_edge[vertex]
if union_find.find(_UpperCamelCase ) != union_find.find(_UpperCamelCase ):
union_find.union(_UpperCamelCase ,_UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE__ =num_components - 1
SCREAMING_SNAKE_CASE__ =Graph.build(edges=_UpperCamelCase )
return mst
| 151 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
if isinstance(snake_case__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCAmelCase :
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: float ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.abs((a - b) ).max()
self.assertLessEqual(UpperCAmelCase_ , UpperCAmelCase_ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any]=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model}
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any]=None , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model}
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_output[0]
_SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-3 )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: int=None , **UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_vision_text_model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model}
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(
input_ids=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = output.vision_model_output.attentions
self.assertEqual(len(UpperCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size )
_SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size )
_SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_SCREAMING_SNAKE_CASE = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_SCREAMING_SNAKE_CASE = output.text_model_output.attentions
self.assertEqual(len(UpperCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase ( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
pt_model.to(UpperCAmelCase_ )
pt_model.eval()
# prepare inputs
_SCREAMING_SNAKE_CASE = inputs_dict
_SCREAMING_SNAKE_CASE = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_SCREAMING_SNAKE_CASE = pt_model(**UpperCAmelCase_ ).to_tuple()
_SCREAMING_SNAKE_CASE = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
pt_model_loaded.to(UpperCAmelCase_ )
pt_model_loaded.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCAmelCase_ , pt_output_loaded.numpy() , 4E-2 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = fx_state
self.check_pt_flax_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
self.check_pt_flax_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_save_load(**UpperCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCAmelCase_ )
@is_pt_flax_cross_test
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = config_inputs_dict.pop("""vision_config""" )
_SCREAMING_SNAKE_CASE = config_inputs_dict.pop("""text_config""" )
_SCREAMING_SNAKE_CASE = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.check_equivalence_flax_to_pt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_pretrained_model_and_inputs()
_SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = after_outputs[0]
_SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-5 )
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCAmelCase_ , text_from_pt=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self: str , UpperCAmelCase_: int , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxViTModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
return vision_model, text_model
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
_SCREAMING_SNAKE_CASE = FlaxBertModelTester(self )
_SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = vision_config_and_inputs
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=UpperCAmelCase_ , text_from_pt=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxCLIPVisionModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
return vision_model, text_model
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxCLIPVisionModelTester(self )
_SCREAMING_SNAKE_CASE = FlaxBertModelTester(self )
_SCREAMING_SNAKE_CASE = clip_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = vision_config_and_inputs
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
_SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_SCREAMING_SNAKE_CASE = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_SCREAMING_SNAKE_CASE = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , UpperCAmelCase_ , atol=1E-3 ) )
| 569 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''ResNetConfig'''
# Base docstring
UpperCamelCase = '''microsoft/resnet-50'''
UpperCamelCase = [1, 2_048, 7, 7]
# Image classification docstring
UpperCamelCase = '''microsoft/resnet-50'''
UpperCamelCase = '''tiger cat'''
UpperCamelCase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int = 3 , UpperCAmelCase_: int = 1 , UpperCAmelCase_: str = "relu" ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(
UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=kernel_size // 2 , bias=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.BatchNormad(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return hidden_state
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[int] , UpperCAmelCase_: ResNetConfig ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_SCREAMING_SNAKE_CASE = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_SCREAMING_SNAKE_CASE = config.num_channels
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase_ )
return embedding
class __UpperCAmelCase (nn.Module ):
def __init__( self: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int = 2 ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , stride=UpperCAmelCase_ , bias=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.BatchNormad(UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
return hidden_state
class __UpperCAmelCase (nn.Module ):
def __init__( self: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int = 1 , UpperCAmelCase_: str = "relu" ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
_SCREAMING_SNAKE_CASE = (
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , activation=UpperCAmelCase_ ) , )
_SCREAMING_SNAKE_CASE = ACTaFN[activation]
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = hidden_state
_SCREAMING_SNAKE_CASE = self.layer(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase_ )
hidden_state += residual
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return hidden_state
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int = 1 , UpperCAmelCase_: str = "relu" , UpperCAmelCase_: int = 4 ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
_SCREAMING_SNAKE_CASE = out_channels // reduction
_SCREAMING_SNAKE_CASE = (
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_ ) , )
_SCREAMING_SNAKE_CASE = ACTaFN[activation]
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = hidden_state
_SCREAMING_SNAKE_CASE = self.layer(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase_ )
hidden_state += residual
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return hidden_state
class __UpperCAmelCase (nn.Module ):
def __init__( self: List[Any] , UpperCAmelCase_: ResNetConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int = 2 , UpperCAmelCase_: int = 2 , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , activation=config.hidden_act ) , *[layer(UpperCAmelCase_ , UpperCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = input
for layer in self.layers:
_SCREAMING_SNAKE_CASE = layer(UpperCAmelCase_ )
return hidden_state
class __UpperCAmelCase (nn.Module ):
def __init__( self: int , UpperCAmelCase_: ResNetConfig ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_ ) )
def UpperCamelCase ( self: str , UpperCAmelCase_: Tensor , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
_SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = ResNetConfig
__snake_case : Tuple = "resnet"
__snake_case : str = "pixel_values"
__snake_case : str = True
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Dict=False ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = value
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: int , UpperCAmelCase_: List[str] ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = ResNetEmbeddings(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ResNetEncoder(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tensor , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
_SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: int , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = ResNetModel(UpperCAmelCase_ )
# classification head
_SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.FloatTensor] = None , UpperCAmelCase_: Optional[torch.LongTensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.resnet(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
def __init__( self: int , UpperCAmelCase_: Any ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
super()._init_backbone(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [config.embedding_size] + config.hidden_sizes
_SCREAMING_SNAKE_CASE = ResNetEmbeddings(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ResNetEncoder(UpperCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Tensor , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.encoder(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase_ , )
| 569 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Tuple=1 ) -> Any:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: List[Any] ) -> Dict:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE )
| 371 |
_UpperCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_UpperCAmelCase = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
if set(UpperCamelCase_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCamelCase_ = ""
for word in coded.split():
while len(UpperCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = original_name.split(""".""" )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
SCREAMING_SNAKE_CASE : List[Any] = int(key_list[key_list.index(lowerCamelCase_ ) - 2] )
SCREAMING_SNAKE_CASE : str = int(key_list[key_list.index(lowerCamelCase_ ) - 1] )
SCREAMING_SNAKE_CASE : Optional[Any] = orig_block_num - offset
SCREAMING_SNAKE_CASE : List[Any] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = key[: key.find("""proj""" )]
SCREAMING_SNAKE_CASE : str = key.replace(lowerCamelCase_ , f'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE : Dict = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE : str = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE : int = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE : List[str] = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
SCREAMING_SNAKE_CASE : str = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , """norm1""" , """before_norm""" )
if "norm2" in key:
SCREAMING_SNAKE_CASE : Optional[int] = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE : Tuple = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE : Optional[int] = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE : Dict = value
return new_state_dict
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
@torch.no_grad()
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE : int = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : Optional[Any] = model_name[-3:]
SCREAMING_SNAKE_CASE : Optional[int] = 10_00
SCREAMING_SNAKE_CASE : Tuple = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 10_00)
# set config attributes
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE : List[str] = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE : str = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : List[str] = 4.0
SCREAMING_SNAKE_CASE : Tuple = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE : List[str] = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE : int = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : str = 4.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE : int = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE : Tuple = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : List[str] = 4.0
SCREAMING_SNAKE_CASE : Optional[Any] = 1E-6
SCREAMING_SNAKE_CASE : Dict = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE : Optional[int] = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE : Union[str, Any] = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE : Optional[Any] = 4.0
SCREAMING_SNAKE_CASE : int = 1E-6
SCREAMING_SNAKE_CASE : Dict = 0.95
elif size == "m48":
SCREAMING_SNAKE_CASE : List[str] = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE : Optional[int] = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE : List[Any] = 4.0
SCREAMING_SNAKE_CASE : Optional[int] = 1E-6
SCREAMING_SNAKE_CASE : List[str] = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE : List[str] = PoolFormerImageProcessor(crop_pct=lowerCamelCase_ )
# Prepare image
SCREAMING_SNAKE_CASE : int = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE : Any = torch.load(lowerCamelCase_ , map_location=torch.device("""cpu""" ) )
# rename keys
SCREAMING_SNAKE_CASE : List[Any] = rename_keys(lowerCamelCase_ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : Any = PoolFormerForImageClassification(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE : int = PoolFormerImageProcessor(crop_pct=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
SCREAMING_SNAKE_CASE : int = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 379 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase = os.path.join(git_repo_path, """src""", """transformers""")
__UpperCAmelCase = """
{0} = None
"""
__UpperCAmelCase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__UpperCAmelCase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowerCamelCase_ , """tokenizers""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowerCamelCase_ , """tensorflow_text""" )
SCREAMING_SNAKE_CASE : List[str] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tokenizers""" )
SCREAMING_SNAKE_CASE : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tensorflow_text""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowerCamelCase_ , """sentencepiece_and_tokenizers_and_vision""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowerCamelCase_ )
self.assertIn("""tensorflow_text""" , lowerCamelCase_ )
self.assertIn("""sentencepiece_and_tokenizers""" , lowerCamelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(lowerCamelCase_ , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
lowerCamelCase_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE : str = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , lowerCamelCase_ )
| 379 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : Union[str, Any] ={
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =["""OwlViTFeatureExtractor"""]
A_ : Dict =["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] =[
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 222 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A_ : Optional[int] =[
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
A_ : List[Any] =[
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
A_ : Union[str, Any] =(
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
A_ : int =(
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
A_ : Dict =[
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : Dict )-> Optional[int]:
for tf_name, hf_name in patterns:
_lowerCamelCase = k.replace(snake_case , snake_case )
return k
def SCREAMING_SNAKE_CASE_ ( snake_case : dict , snake_case : dict )-> BigBirdPegasusForConditionalGeneration:
_lowerCamelCase = BigBirdPegasusConfig(**snake_case )
_lowerCamelCase = BigBirdPegasusForConditionalGeneration(snake_case )
_lowerCamelCase = torch_model.state_dict()
_lowerCamelCase = {}
# separating decoder weights
_lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
_lowerCamelCase = [k.endswith(snake_case ) for ending in KEYS_TO_IGNORE]
if any(snake_case ):
continue
_lowerCamelCase = DECODER_PATTERNS
_lowerCamelCase = rename_state_dict_key(snake_case , snake_case )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCamelCase = v.T
_lowerCamelCase = torch.from_numpy(snake_case )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
_lowerCamelCase = [k.endswith(snake_case ) for ending in KEYS_TO_IGNORE]
if any(snake_case ):
continue
_lowerCamelCase = REMAINING_PATTERNS
_lowerCamelCase = rename_state_dict_key(snake_case , snake_case )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCamelCase = v.T
_lowerCamelCase = torch.from_numpy(snake_case )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_lowerCamelCase = mapping['model.embed_positions.weight']
_lowerCamelCase = mapping.pop('model.embed_positions.weight' )
_lowerCamelCase , _lowerCamelCase = torch_model.load_state_dict(snake_case , strict=snake_case )
_lowerCamelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Dict:
_lowerCamelCase = tf.train.list_variables(snake_case )
_lowerCamelCase = {}
_lowerCamelCase = ['global_step']
for name, shape in tqdm(snake_case , desc='converting tf checkpoint to dict' ):
_lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase = tf.train.load_variable(snake_case , snake_case )
_lowerCamelCase = array
return tf_weights
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str , snake_case : dict )-> List[str]:
_lowerCamelCase = get_tf_weights_as_numpy(snake_case )
_lowerCamelCase = convert_bigbird_pegasus(snake_case , snake_case )
torch_model.save_pretrained(snake_case )
if __name__ == "__main__":
A_ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
A_ : Union[str, Any] =parser.parse_args()
A_ : List[str] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 222 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = VQModel
snake_case__ = "sample"
@property
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any=(32, 32) ) -> List[Any]:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": image}
@property
def a ( self : Tuple ) -> Dict:
return (3, 32, 32)
@property
def a ( self : Optional[Any] ) -> List[Any]:
return (3, 32, 32)
def a ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def a ( self : Union[str, Any] ) -> List[str]:
pass
def a ( self : List[Any] ) -> Dict:
pass
def a ( self : Union[str, Any] ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : List[Any] ) -> str:
lowerCAmelCase__ = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(SCREAMING_SNAKE_CASE__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase__ = image.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ ).sample
lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
| 61 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int ):
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
__lowercase , __lowercase = 1, 1
for _ in range(number_of_steps - 1 ):
__lowercase , __lowercase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import math
_A = 10
_A = 7
_A = BALLS_PER_COLOUR * NUM_COLOURS
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 20 ) -> str:
"""simple docstring"""
a_ = math.comb(UpperCamelCase , UpperCamelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 403 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
__A : Tuple = StableDiffusionPanoramaPipeline
__A : Dict = TEXT_TO_IMAGE_PARAMS
__A : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ = CLIPTextModel(_lowerCamelCase )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
lowerCAmelCase_ = torch.manual_seed(_lowerCamelCase )
lowerCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionPanoramaPipeline(**_lowerCamelCase )
lowerCAmelCase_ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase )
lowerCAmelCase_ = sd_pipe(**_lowerCamelCase ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionPanoramaPipeline(**_lowerCamelCase )
lowerCAmelCase_ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase )
lowerCAmelCase_ = '''french fries'''
lowerCAmelCase_ = sd_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionPanoramaPipeline(**_lowerCamelCase )
lowerCAmelCase_ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase )
lowerCAmelCase_ = sd_pipe(**_lowerCamelCase , view_batch_size=2 )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowerCAmelCase_ = StableDiffusionPanoramaPipeline(**_lowerCamelCase )
lowerCAmelCase_ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase )
lowerCAmelCase_ = sd_pipe(**_lowerCamelCase ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=_lowerCamelCase )
lowerCAmelCase_ = StableDiffusionPanoramaPipeline(**_lowerCamelCase )
lowerCAmelCase_ = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase )
lowerCAmelCase_ = sd_pipe(**_lowerCamelCase ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , _lowerCamelCase=0 ):
lowerCAmelCase_ = torch.manual_seed(_lowerCamelCase )
lowerCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase_ = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
lowerCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**_lowerCamelCase ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCAmelCase_ = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_lowerCamelCase )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**_lowerCamelCase ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCAmelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = 0
def callback_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
lowerCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase_ = latents[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase_ = latents[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCAmelCase_ = False
lowerCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase_ = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
lowerCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
lowerCAmelCase_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
pipe(**_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase_ = DDIMScheduler.from_pretrained(_lowerCamelCase , subfolder='''scheduler''' )
lowerCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase )
lowerCAmelCase_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**_lowerCamelCase )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 606 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def snake_case_ ( __snake_case : str = "laptop") -> DataFrame:
lowerCAmelCase_ = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCAmelCase_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
lowerCAmelCase_ = BeautifulSoup(requests.get(__snake_case , headers=__snake_case).text)
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
lowerCAmelCase_ = item.ha.text
lowerCAmelCase_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
lowerCAmelCase_ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
lowerCAmelCase_ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
lowerCAmelCase_ = '''Not available'''
try:
lowerCAmelCase_ = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
lowerCAmelCase_ = ''''''
try:
lowerCAmelCase_ = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
lowerCAmelCase_ = float('''nan''')
except AttributeError:
pass
lowerCAmelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ = ''' '''
lowerCAmelCase_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : Optional[int] ='''headphones'''
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 606 | 1 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__SCREAMING_SNAKE_CASE : List[str] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] ='T5Config'
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Union[str, Any] = '''mt5'''
A__ : List[Any] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Optional[int] = '''mt5'''
A__ : Optional[Any] = MTaConfig
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Dict = '''mt5'''
A__ : List[Any] = MTaConfig
| 135 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE : int ={
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[str] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
A: List[str] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
A: List[str] = set()
A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: Union[str, Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Optional[int]:
A: Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
A: Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
A: List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
A: Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
A: Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
A: List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
A: Union[str, Any] = json.load(A )
A: Tuple = {v: k for k, v in self.encoder.items()}
A: Optional[Any] = errors # how to handle errors in decoding
A: str = bytes_to_unicode()
A: Dict = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
A: List[str] = merges_handle.read().split("""\n""" )[1:-1]
A: Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
A: Optional[Any] = dict(zip(A , range(len(A ) ) ) )
A: List[Any] = {}
A: str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a__ ( self ) -> Optional[int]:
return len(self.encoder )
def a__ ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , A ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
A: Tuple = tuple(A )
A: Optional[int] = get_pairs(A )
if not pairs:
return token
while True:
A: Optional[Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Tuple = bigram
A: Any = []
A: Union[str, Any] = 0
while i < len(A ):
try:
A: Dict = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: Tuple = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: str = tuple(A )
A: List[Any] = new_word
if len(A ) == 1:
break
else:
A: Optional[int] = get_pairs(A )
A: List[str] = """ """.join(A )
A: Dict = word
return word
def a__ ( self , A ) -> List[str]:
A: Dict = []
for token in re.findall(self.pat , A ):
A: Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def a__ ( self , A ) -> int:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def a__ ( self , A ) -> int:
return self.decoder.get(A )
def a__ ( self , A ) -> int:
A: Tuple = """""".join(A )
A: List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A: Optional[int] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
A: Optional[Any] = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A: Union[str, Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: Optional[Any] = [self.sep_token_id]
A: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , A , A=False , **A ) -> Dict:
A: Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
A: Optional[int] = """ """ + text
return (text, kwargs)
def a__ ( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
A: Any = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
A: str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A: Optional[int] = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
A: Union[str, Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A: List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A: List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 135 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : int , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 50 , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowerCamelCase ):
_UpperCAmelCase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : Any = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : Union[str, Any] = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , eta=_lowerCamelCase , use_clipped_model_output=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
_UpperCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 704 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : int , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 50 , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _lowerCamelCase ):
_UpperCAmelCase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : Optional[int] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : Any = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : Union[str, Any] = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , eta=_lowerCamelCase , use_clipped_model_output=_lowerCamelCase , generator=_lowerCamelCase ).prev_sample
_UpperCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 328 | 0 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Any = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 400 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
try:
snake_case_ = tempfile.mktemp()
with open(a__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a__ )
snake_case_ = AlbertTokenizer.from_pretrained(a__ )
finally:
os.remove(a__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a__ )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ = TOKEN
HfFolder.save_token(a__ )
@classmethod
def lowerCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a__ , repo_id="test-tokenizer" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = CustomTokenizer(a__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizerFast.from_pretrained(a__ )
bert_tokenizer.save_pretrained(a__ )
snake_case_ = CustomTokenizerFast.from_pretrained(a__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case_ = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=a__ , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
snake_case_ = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a__ , ["AB", "C"] )
| 400 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __magic_name__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=4 , _a="gelu" , _a=0.0 , _a=0.1 , _a=True , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> Dict:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_multiple_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = weight_tying
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self ) -> Dict:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ = True
return config, input_ids, input_mask, token_labels
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = GPTNeoXJapaneseModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a )
lowerCAmelCase_ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a ) -> str:
lowerCAmelCase_ = True
lowerCAmelCase_ = GPTNeoXJapaneseModel(_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = GPTNeoXJapaneseForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase_ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _a , _a , _a ) -> Union[str, Any]:
lowerCAmelCase_ = True
lowerCAmelCase_ = GPTNeoXJapaneseForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCAmelCase_ = model(_a , attention_mask=_a , use_cache=_a )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(_a , attention_mask=_a , output_hidden_states=_a )
lowerCAmelCase_ = output_from_no_past["hidden_states"][0]
lowerCAmelCase_ = model(
_a , attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["hidden_states"][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> int:
lowerCAmelCase_ = GPTNeoXJapaneseModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_a , hidden_size=37 )
def __a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_a , _a , _a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_a , _a , _a )
def __a ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(_a , _a , _a )
def __a ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_a , _a , _a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_a )
@slow
def __a ( self ) -> Any:
lowerCAmelCase_ = "abeja/gpt-neox-japanese-2.7b"
lowerCAmelCase_ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
lowerCAmelCase_ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
lowerCAmelCase_ = GPTNeoXJapaneseTokenizer.from_pretrained(_a )
lowerCAmelCase_ = GPTNeoXJapaneseForCausalLM.from_pretrained(_a )
lowerCAmelCase_ = []
for prompt in prompts:
lowerCAmelCase_ = tokenizer(_a , return_tensors="pt" ).input_ids
lowerCAmelCase_ = model.generate(_a , max_length=50 )
lowerCAmelCase_ = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
| 707 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase__ = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase__ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def A(__a: int ):
lowerCAmelCase_ = [[False for i in range(__a )] for j in range(__a )]
return canvas
def A(__a: list[list[bool]] ):
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
lowerCAmelCase_ = bool(random.getrandbits(1 ) )
def A(__a: list[list[bool]] ):
lowerCAmelCase_ = np.array(__a )
lowerCAmelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
lowerCAmelCase_ = __judge_point(
__a , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCAmelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCAmelCase_ = current_canvas.tolist()
return return_canvas
def A(__a: bool , __a: list[list[bool]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCAmelCase_ = pt
if pt:
if alive < 2:
lowerCAmelCase_ = False
elif alive == 2 or alive == 3:
lowerCAmelCase_ = True
elif alive > 3:
lowerCAmelCase_ = False
else:
if alive == 3:
lowerCAmelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase__ = create_canvas(canvas_size)
seed(c)
lowerCamelCase__ , lowerCamelCase__ = plt.subplots()
fig.show()
lowerCamelCase__ = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 226 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case__ : Any = logging.get_logger(__name__)
class snake_case ( _snake_case ):
'''simple docstring'''
def __init__( self : int , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ) ->None:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 392 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ : Optional[int] = ['''text''', '''image''', '''audio''']
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
inputs.append(create_inputs(_lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for output in outputs:
if isinstance(_lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class snake_case :
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCAmelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self : List[Any] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def UpperCAmelCase ( self : Tuple ) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCAmelCase ( self : List[Any] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
UpperCAmelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( self : List[str] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 392 | 1 |
def lowerCAmelCase__ ( lowerCamelCase_ : float):
'''simple docstring'''
return 10 - x * x
def lowerCAmelCase__ ( lowerCamelCase_ : float ,lowerCamelCase_ : float):
'''simple docstring'''
if equation(lowerCamelCase_) * equation(lowerCamelCase_) >= 0:
raise ValueError('''Wrong space!''')
lowerCAmelCase__ : Optional[int] = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase__ : Optional[Any] = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase_) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase_) * equation(lowerCamelCase_) < 0:
lowerCAmelCase__ : List[Any] = c
else:
lowerCAmelCase__ : Dict = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 713 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case : Optional[int] =get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
__snake_case : Optional[Any] =json.load(f)
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = f"""facebook/wmt19-{pair}"""
lowerCAmelCase__ : int = self.get_tokenizer(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.get_model(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = bleu_data[pair]['''src''']
lowerCAmelCase__ : List[Any] = bleu_data[pair]['''tgt''']
lowerCAmelCase__ : List[str] = tokenizer(__lowerCamelCase ,return_tensors='''pt''' ,truncation=__lowerCamelCase ,padding='''longest''' ).to(__lowerCamelCase )
lowerCAmelCase__ : List[str] = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
lowerCAmelCase__ : Tuple = tokenizer.batch_decode(
__lowerCamelCase ,skip_special_tokens=__lowerCamelCase ,clean_up_tokenization_spaces=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = calculate_bleu(__lowerCamelCase ,__lowerCamelCase )
print(__lowerCamelCase )
self.assertGreaterEqual(scores['''bleu'''] ,__lowerCamelCase )
| 90 | 0 |
def lowerCAmelCase_ ( _lowercase : Any) -> Tuple:
"""simple docstring"""
a__ : List[str] = 1
a__ : str = 2
while i * i <= n:
a__ : List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : int = 1
a__ : Union[str, Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_UpperCamelCase) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 136 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Tuple = KandinskyInpaintPipeline
a: Union[str, Any] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a: Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a: str = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a: Dict = False
@property
def _A ( self: Dict ):
return 32
@property
def _A ( self: Any ):
return 32
@property
def _A ( self: Optional[Any] ):
return self.time_input_dim
@property
def _A ( self: Tuple ):
return self.time_input_dim * 4
@property
def _A ( self: Dict ):
return 100
@property
def _A ( self: Optional[int] ):
_a = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _A ( self: str ):
torch.manual_seed(0 )
_a = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_a = MultilingualCLIP(__UpperCamelCase )
_a = text_encoder.eval()
return text_encoder
@property
def _A ( self: Tuple ):
torch.manual_seed(0 )
_a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def _A ( self: str ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self: Dict ):
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self: Dict ):
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_unet
_a = self.dummy_movq
_a = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__UpperCamelCase , )
_a = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _A ( self: Dict , __UpperCamelCase: List[Any] , __UpperCamelCase: List[Any]=0 ):
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCamelCase )
# create init_image
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
_a = np.ones((64, 64) , dtype=np.floataa )
_a = 0
if str(__UpperCamelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__UpperCamelCase )
else:
_a = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_a = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _A ( self: int ):
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**__UpperCamelCase )
_a = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _A ( self: Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self: int ):
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a = np.ones((768, 768) , dtype=np.floataa )
_a = 0
_a = '''a hat'''
_a = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
_a = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a = pipeline(
__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 487 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = original_name.split('''.''' )[0]
__A = key.split('''.''' )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
__A = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
__A = orig_block_num - offset
__A = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = OrderedDict()
__A , __A = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__A = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__A = key[: key.find('''proj''' )]
__A = key.replace(__UpperCamelCase , f'patch_embeddings.{total_embed_found}.' )
__A = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__A = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__A = replace_key_with_offset(__UpperCamelCase , __UpperCamelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__A = key.replace('''head''' , '''classifier''' )
__A = value
return new_state_dict
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = PoolFormerConfig()
# set attributes based on model_name
__A = '''huggingface/label-files'''
__A = model_name[-3:]
__A = 1_0_0_0
__A = '''imagenet-1k-id2label.json'''
__A = (1, 1_0_0_0)
# set config attributes
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
if size == "s12":
__A = [2, 2, 6, 2]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s24":
__A = [4, 4, 1_2, 4]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 0.9
elif size == "s36":
__A = [6, 6, 1_8, 6]
__A = [6_4, 1_2_8, 3_2_0, 5_1_2]
__A = 4.0
__A = 1e-6
__A = 0.9
elif size == "m36":
__A = [6, 6, 1_8, 6]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
elif size == "m48":
__A = [8, 8, 2_4, 8]
__A = [9_6, 1_9_2, 3_8_4, 7_6_8]
__A = 4.0
__A = 1e-6
__A = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
__A = prepare_img()
__A = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
__A = torch.load(__UpperCamelCase , map_location=torch.device('''cpu''' ) )
# rename keys
__A = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
__A = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
__A = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__A = model(__UpperCamelCase )
__A = outputs.logits
# define expected logit slices for different models
if size == "s12":
__A = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__A = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__A = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__A = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__A = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 215 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : List[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowercase__ : Optional[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowercase__ : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowercase ( _a , _a , _a=False , _a=False , _a=True , _a=False , _a="dummy_doc" ):
snake_case_ : Union[str, Any] = {doc: key_lines}
snake_case_ : int = {doc: sys_lines}
snake_case_ : Optional[Any] = {}
snake_case_ : Dict = 0
snake_case_ : List[Any] = 0
snake_case_ : str = 0
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
snake_case_, snake_case_ : Dict = reader.get_doc_mentions(_a , key_doc_lines[doc] , _a )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ : List[Any] = reader.set_annotated_parse_trees(_a , key_doc_lines[doc] , _a , _a )
snake_case_, snake_case_ : str = reader.get_doc_mentions(_a , sys_doc_lines[doc] , _a )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ : Optional[Any] = reader.set_annotated_parse_trees(_a , key_doc_lines[doc] , _a , _a )
if remove_nested:
snake_case_, snake_case_ : Union[str, Any] = reader.remove_nested_coref_mentions(_a , _a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case_, snake_case_ : Optional[int] = reader.remove_nested_coref_mentions(_a , _a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case_ : List[Any] = reader.get_mention_assignments(_a , _a )
snake_case_ : Optional[Any] = reader.get_mention_assignments(_a , _a )
snake_case_ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __lowercase ( _a , _a , _a , _a , _a , _a , _a ):
snake_case_ : str = get_coref_infos(_a , _a , _a , _a , _a , _a )
snake_case_ : Any = {}
snake_case_ : List[str] = 0
snake_case_ : Any = 0
for name, metric in metrics:
snake_case_, snake_case_, snake_case_ : Union[str, Any] = evaluator.evaluate_documents(_a , _a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
snake_case_ : Optional[Any] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __lowercase ( _a ):
snake_case_ : Any = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case_ : List[str] = line.split()[5]
if not parse_col == "-":
snake_case_ : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _snake_case ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[Any]=True , lowercase_ : Tuple=False , lowercase_ : Any=False , lowercase_ : List[Any]=False ):
snake_case_ : Optional[int] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case_ : str = util.check_gold_parse_annotation(lowercase_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 123 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : str = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowercase ( _a , _a ):
inspect_dataset(_a , _a )
snake_case_ : Optional[int] = path + '''.py'''
assert script_name in os.listdir(_a )
assert "__pycache__" not in os.listdir(_a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowercase ( _a , _a ):
inspect_metric(_a , _a )
snake_case_ : List[Any] = path + '''.py'''
assert script_name in os.listdir(_a )
assert "__pycache__" not in os.listdir(_a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowercase ( _a , _a , _a ):
snake_case_ : List[str] = get_dataset_config_info(_a , config_name=_a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowercase ( _a , _a , _a ):
with pytest.raises(_a ):
get_dataset_config_info(_a , config_name=_a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowercase ( _a , _a ):
snake_case_ : str = get_dataset_config_names(_a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowercase ( _a , _a , _a ):
snake_case_ : Any = get_dataset_infos(_a )
assert list(infos.keys() ) == expected_configs
snake_case_ : Tuple = expected_configs[0]
assert expected_config in infos
snake_case_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowercase ( _a , _a , _a ):
snake_case_ : Any = get_dataset_infos(_a )
assert expected_config in infos
snake_case_ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowercase ( _a , _a , _a ):
with pytest.raises(_a ):
get_dataset_split_names(_a , config_name=_a )
| 123 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
_UpperCamelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_UpperCamelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_UpperCamelCase = sorted(arg_to_scheduler.keys())
_UpperCamelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class SCREAMING_SNAKE_CASE_ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self :str , __lowercase :argparse.Namespace , __lowercase :Optional[int]=None , __lowercase :str="base" , __lowercase :Optional[int]=None , __lowercase :List[str]=None , __lowercase :Tuple=None , **__lowercase :Optional[int] , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__lowercase )
__lowerCamelCase : List[Any] =0
__lowerCamelCase : List[str] =Path(self.hparams.output_dir )
__lowerCamelCase : Optional[int] =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__lowerCamelCase : str =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__lowercase , **__lowercase , )
else:
__lowerCamelCase : PretrainedConfig =config
__lowerCamelCase : Union[str, Any] =('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __lowercase , __lowercase ):
assert hasattr(self.config , __lowercase ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __lowercase , getattr(self.hparams , __lowercase ) )
if tokenizer is None:
__lowerCamelCase : List[str] =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__lowercase , )
else:
__lowerCamelCase : PreTrainedTokenizer =tokenizer
__lowerCamelCase : str =MODEL_MODES[mode]
if model is None:
__lowerCamelCase : List[Any] =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__lowercase , )
else:
__lowerCamelCase : Union[str, Any] =model
def __lowercase ( self :List[str] , *__lowercase :Optional[int] , **__lowercase :Optional[Any] ):
__lowerCamelCase : str =self.model_type.from_pretrained(*__lowercase , **__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : List[Any] =arg_to_scheduler[self.hparams.lr_scheduler]
__lowerCamelCase : str =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__lowerCamelCase : Union[str, Any] ={'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __lowercase ( self :Any ):
__lowerCamelCase : List[str] =self.model
__lowerCamelCase : int =['''bias''', '''LayerNorm.weight''']
__lowerCamelCase : Optional[Any] =[
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__lowerCamelCase : Optional[int] =Adafactor(
__lowercase , lr=self.hparams.learning_rate , scale_parameter=__lowercase , relative_step=__lowercase )
else:
__lowerCamelCase : int =AdamW(
__lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__lowerCamelCase : int =optimizer
__lowerCamelCase : str =self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowercase ( self :List[Any] , __lowercase :Any , __lowercase :Optional[int] ):
return self.validation_step(__lowercase , __lowercase )
def __lowercase ( self :Optional[Any] , __lowercase :str ):
return self.validation_end(__lowercase )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : str =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__lowerCamelCase : Optional[int] =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowercase ( self :Tuple , __lowercase :Dict ):
if stage == "test":
__lowerCamelCase : List[Any] =len(self.test_dataloader().dataset )
else:
__lowerCamelCase : Optional[int] =self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__lowercase )
__lowerCamelCase : str =len(self.train_dataloader().dataset )
def __lowercase ( self :int , __lowercase :str , __lowercase :int , __lowercase :bool = False ):
raise NotImplementedError('''You must implement this for your task''' )
def __lowercase ( self :Tuple ):
return self.train_loader
def __lowercase ( self :Tuple ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__lowercase )
def __lowercase ( self :List[Any] ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__lowercase )
def __lowercase ( self :List[str] , __lowercase :Union[str, Any] ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__lowercase , list(filter(__lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowercase ( self :int , __lowercase :Dict[str, Any] ):
__lowerCamelCase : int =self.output_dir.joinpath('''best_tfmr''' )
__lowerCamelCase : List[str] =self.step_count
self.model.save_pretrained(__lowercase )
self.tokenizer.save_pretrained(__lowercase )
@staticmethod
def __lowercase ( __lowercase :int , __lowercase :Optional[int] ):
parser.add_argument(
'''--model_name_or_path''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__lowercase , type=__lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__lowercase ).parent / '''test_run''' / '''cache''' ) , type=__lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__lowercase , metavar=__lowercase , type=__lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=__lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=__lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
"""simple docstring"""
def __lowercase ( self :Dict , __lowercase :Union[str, Any] , __lowercase :Union[str, Any] ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
"""simple docstring"""
def __lowercase ( self :Any , __lowercase :int , __lowercase :List[Any] ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__lowercase )
class SCREAMING_SNAKE_CASE_ ( pl.Callback ):
"""simple docstring"""
def __lowercase ( self :Any , __lowercase :Any , __lowercase :Optional[int] ):
__lowerCamelCase : Union[str, Any] =trainer.lr_schedulers[0]['''scheduler''']
__lowerCamelCase : List[str] ={f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__lowercase )
def __lowercase ( self :List[Any] , __lowercase :pl.Trainer , __lowercase :pl.LightningModule ):
rank_zero_info('''***** Validation results *****''' )
__lowerCamelCase : int =trainer.callback_metrics
# Log results
for key in sorted(__lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowercase , str(metrics[key] ) ) )
def __lowercase ( self :Optional[int] , __lowercase :pl.Trainer , __lowercase :pl.LightningModule ):
rank_zero_info('''***** Test results *****''' )
__lowerCamelCase : Dict =trainer.callback_metrics
# Log and save results to file
__lowerCamelCase : str =os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__lowercase , '''w''' ) as writer:
for key in sorted(__lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__lowercase , str(metrics[key] ) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(SCREAMING_SNAKE_CASE ).parent / '''test_run''' / '''model_checkpoints''' ) , type=SCREAMING_SNAKE_CASE , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=SCREAMING_SNAKE_CASE , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(SCREAMING_SNAKE_CASE ).parent / '''test_run''' / '''dummy-train-data''' ) , type=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : BaseTransformer , SCREAMING_SNAKE_CASE : argparse.Namespace , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=[] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
__lowerCamelCase : str =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
__lowerCamelCase : Optional[Any] =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(SCREAMING_SNAKE_CASE )
if logging_callback is None:
__lowerCamelCase : int =LoggingCallback()
__lowerCamelCase : Any ={}
if args.fpaa:
__lowerCamelCase : Any =16
if args.gpus > 1:
__lowerCamelCase : Any ='''auto'''
__lowerCamelCase : List[Any] ='''ddp'''
__lowerCamelCase : int =args.accumulate_grad_batches
__lowerCamelCase : List[Any] =None
__lowerCamelCase : int ='''auto'''
__lowerCamelCase : Optional[int] =pl.Trainer.from_argparse_args(
SCREAMING_SNAKE_CASE , weights_summary=SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(SCREAMING_SNAKE_CASE )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 363 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_UpperCamelCase = False
try:
_UpperCamelCase = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :str = None , __lowercase :list = [] ):
__lowerCamelCase : Any =0
__lowerCamelCase : List[str] =choices
__lowerCamelCase : int =prompt
if sys.platform == "win32":
__lowerCamelCase : Dict ='''*'''
else:
__lowerCamelCase : Union[str, Any] ='''➔ '''
def __lowercase ( self :Tuple , __lowercase :Any , __lowercase :str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __lowercase )
else:
forceWrite(self.choices[index] , __lowercase )
def __lowercase ( self :Tuple , __lowercase :int ):
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(__lowercase )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def __lowercase ( self :Tuple , __lowercase :Direction , __lowercase :int = 1 ):
__lowerCamelCase : List[str] =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__lowercase )
move_cursor(__lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def __lowercase ( self :Union[str, Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def __lowercase ( self :Union[str, Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def __lowercase ( self :Any ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def __lowercase ( self :Any ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__lowercase )] for number in range(10 )] )
def __lowercase ( self :Any ):
__lowerCamelCase : Tuple =int(chr(self.current_selection ) )
__lowerCamelCase : Dict =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __lowercase )
else:
return
else:
return
def __lowercase ( self :Optional[int] , __lowercase :int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__lowerCamelCase : Union[str, Any] =default_choice
for i in range(len(self.choices ) ):
self.print_choice(__lowercase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Union[str, Any] =int(builtins.input() )
except ValueError:
__lowerCamelCase : Optional[Any] =default_choice
else:
__lowerCamelCase : Dict =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__lowercase , '''\n''' )
return choice
| 363 | 1 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __A ( a_ :List[str] , a_ :Union[str, Any] , a_ :List[Any]) -> Optional[int]:
__a : List[Any] = 0
if start < end:
__a : List[Any] = randint(a_ , a_)
__a : Tuple = a[end]
__a : Any = a[pivot]
__a : Dict = temp
__a , __a : List[str] = _in_place_partition(a_ , a_ , a_)
count += _in_place_quick_sort(a_ , a_ , p - 1)
count += _in_place_quick_sort(a_ , p + 1 , a_)
return count
def __A ( a_ :Any , a_ :str , a_ :Dict) -> Tuple:
__a : str = 0
__a : int = randint(a_ , a_)
__a : Optional[int] = a[end]
__a : Any = a[pivot]
__a : int = temp
__a : Optional[Any] = start - 1
for index in range(a_ , a_):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__a : List[str] = new_pivot_index + 1
__a : Dict = a[new_pivot_index]
__a : int = a[index]
__a : List[Any] = temp
__a : Optional[int] = a[new_pivot_index + 1]
__a : str = a[end]
__a : List[str] = temp
return new_pivot_index + 1, count
A = TemporaryFile()
A = 100 # 1000 elements are to be sorted
A , A = 0, 1 # mean and standard deviation
A = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
A = np.load(outfile)
A = len(M) - 1
A = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 52 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
# TODO Update this
__snake_case : Union[str, Any] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'esm'
def __init__( self : Tuple , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Optional[Any]=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=10_26 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : List[Any]="absolute" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , mask_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Any =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Tuple =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : Tuple =intermediate_size
A__ : int =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Tuple =max_position_embeddings
A__ : List[Any] =initializer_range
A__ : Optional[Any] =layer_norm_eps
A__ : Union[str, Any] =position_embedding_type
A__ : str =use_cache
A__ : Optional[int] =emb_layer_norm_before
A__ : Union[str, Any] =token_dropout
A__ : Tuple =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A__ : Optional[int] =EsmFoldConfig()
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =EsmFoldConfig(**lowerCAmelCase_ )
A__ : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A__ : List[str] =get_default_vocab_list()
else:
A__ : List[str] =vocab_list
else:
A__ : Union[str, Any] =None
A__ : List[Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCAmelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase_ ):
A__ : Union[str, Any] =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = None
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = 0
__snake_case = True
__snake_case = False
__snake_case = 128
__snake_case = None
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
if self.trunk is None:
A__ : int =TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase_ ):
A__ : str =TrunkConfig(**self.trunk )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =asdict(self )
A__ : Tuple =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 48
__snake_case = 1024
__snake_case = 128
__snake_case = 32
__snake_case = 32
__snake_case = 32
__snake_case = 0
__snake_case = 0
__snake_case = False
__snake_case = 4
__snake_case = 128
__snake_case = None
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.structure_module is None:
A__ : Dict =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase_ ):
A__ : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : List[str] =self.sequence_state_dim // self.sequence_head_width
A__ : Optional[int] =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
A__ : int =asdict(self )
A__ : Optional[Any] =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase :
'''simple docstring'''
__snake_case = 384
__snake_case = 128
__snake_case = 16
__snake_case = 128
__snake_case = 12
__snake_case = 4
__snake_case = 8
__snake_case = 0.1
__snake_case = 8
__snake_case = 1
__snake_case = 2
__snake_case = 7
__snake_case = 10
__snake_case = 1E-8
__snake_case = 1E5
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return asdict(self )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 215 | 0 |
from collections import deque
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = len(_a)
SCREAMING_SNAKE_CASE : int = deque()
SCREAMING_SNAKE_CASE : List[Any] = [False for _ in range(_a)]
SCREAMING_SNAKE_CASE : List[str] = [-1 for _ in range(_a)]
SCREAMING_SNAKE_CASE : str = index_of[:]
def strong_connect(_a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = index # the number when this node is seen
SCREAMING_SNAKE_CASE : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(_a)
SCREAMING_SNAKE_CASE : List[Any] = True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE : Union[str, Any] = strong_connect(_a , _a , _a)
SCREAMING_SNAKE_CASE : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = stack.pop()
SCREAMING_SNAKE_CASE : str = False
component.append(_a)
while w != v:
SCREAMING_SNAKE_CASE : Dict = stack.pop()
SCREAMING_SNAKE_CASE : Tuple = False
component.append(_a)
components.append(_a)
return index
SCREAMING_SNAKE_CASE : Any = []
for v in range(_a):
if index_of[v] == -1:
strong_connect(_a , 0 , _a)
return components
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Tuple = [[] for _ in range(_a)]
for u, v in edges:
g[u].append(_a)
return g
if __name__ == "__main__":
# Test
a_ = 7
a_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a_ = [(u, v) for u, v in zip(source, target)]
a_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 193 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , a : List[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Dict = data_files
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE : str = []
for split_name, files in data_files.items():
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a ):
with open(a , "rb" ) as f:
SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(a ) )
break
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={"files": files} ) )
return splits
def __UpperCamelCase ( self : Dict , a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : str = table_cast(a , self.info.features.arrow_schema )
return pa_table
def __UpperCamelCase ( self : List[str] , a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ):
with open(a , "rb" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = pq.ParquetFile(a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE : int = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a )}: {e}" )
raise
| 193 | 1 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Dict = args.log_outputs
UpperCAmelCase__ : int = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : int = load_metric('''wer''' )
UpperCAmelCase__ : Optional[int] = load_metric('''cer''' )
# compute metrics
UpperCAmelCase__ : int = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
UpperCAmelCase__ : Any = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
UpperCAmelCase__ : int = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCAmelCase__ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : Tuple = F"""log_{dataset_id}_predictions.txt"""
UpperCAmelCase__ : Dict = F"""log_{dataset_id}_targets.txt"""
with open(lowerCAmelCase__ , '''w''' ) as p, open(lowerCAmelCase__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ , lowerCAmelCase__ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCAmelCase__ , with_indices=lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : List[Any] = re.sub(lowerCAmelCase__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Any = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
UpperCAmelCase__ : Optional[Any] = ''' '''.join(text.split(lowerCAmelCase__ ) )
return text
def a__ ( lowerCAmelCase__ ) -> str:
# load dataset
UpperCAmelCase__ : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : List[str] = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCAmelCase__ ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : Any = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : int = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction['''text''']
UpperCAmelCase__ : Union[str, Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
UpperCAmelCase__ : Optional[int] = dataset.map(lowerCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 75 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def snake_case_ ( ) -> str:
lowerCAmelCase_ = torch.nn.Linear(2 , 4)
lowerCAmelCase_ = torch.optim.AdamW(model.parameters() , lr=1.0)
lowerCAmelCase_ = torch.optim.lr_scheduler.OneCycleLR(__snake_case , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1)
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
lowerCAmelCase_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def snake_case_ ( __snake_case : Any) -> List[Any]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def snake_case_ ( __snake_case : Optional[int]) -> Any:
lowerCAmelCase_ = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(__snake_case)
class __UpperCAmelCase ( __a ):
@require_cuda
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowerCamelCase ):
lowerCAmelCase_ = Accelerator(cpu=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ = GradientState()
assert state.num_steps == 1
lowerCAmelCase_ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase_ = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
(
(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,(
lowerCAmelCase_
) ,
) = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase_ ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCamelCase , **_lowerCamelCase ):
pass
with patch('''torch.cuda.set_device''' , _lowerCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowerCAmelCase_ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = get_signature(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCamelCase )
# make sure random weights don't match
load_random_weights(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) < 1E-3 )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = get_signature(_lowerCamelCase )
# saving hook
def save_config(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(_lowerCamelCase , '''data.json''' ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
# loading hook
def load_config(_lowerCamelCase , _lowerCamelCase ):
with open(os.path.join(_lowerCamelCase , '''data.json''' ) , '''r''' ) as f:
lowerCAmelCase_ = json.load(_lowerCamelCase )
lowerCAmelCase_ = config['''class_name''']
lowerCAmelCase_ = accelerator.register_save_state_pre_hook(_lowerCamelCase )
lowerCAmelCase_ = accelerator.register_load_state_pre_hook(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCamelCase )
# make sure random weights don't match with hooks
load_random_weights(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowerCamelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCamelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
lowerCAmelCase_ = None
# This should work
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Accelerator()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = create_components()
lowerCAmelCase_ = [1, 2, 3]
# This should work
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(_lowerCamelCase , '''_is_accelerate_prepared''' , _lowerCamelCase ) , _lowerCamelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_lowerCamelCase , device_map={'''''': 0} , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
@slow
@require_bnb
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = Accelerator()
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_lowerCamelCase )
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=_lowerCamelCase , load_in_abit=_lowerCamelCase , llm_inta_enable_fpaa_cpu_offload=_lowerCamelCase )
# This should not work and get value error
with self.assertRaises(_lowerCamelCase ):
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase_ = infer_auto_device_map(_lowerCamelCase )
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_lowerCamelCase , device_map=_lowerCamelCase , )
lowerCAmelCase_ = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowerCamelCase ):
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase_ ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowerCAmelCase_ = infer_auto_device_map(_lowerCamelCase )
lowerCAmelCase_ = 1
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=_lowerCamelCase , device_map=_lowerCamelCase , )
lowerCAmelCase_ = Accelerator()
# This should work
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
@require_cuda
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCAmelCase_ = Accelerator(cpu=_lowerCamelCase )
lowerCAmelCase_ = accelerator.prepare(_lowerCamelCase )
| 274 | 0 |
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
assert column_title.isupper()
A_ : Union[str, Any] = 0
A_ : Optional[int] = len(__lowercase ) - 1
A_ : Optional[int] = 0
while index >= 0:
A_ : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 ,__lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase )
else:
A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase )
if hidden_sizes == 1_92:
A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase )
if hidden_sizes == 2_56:
A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase )
if hidden_sizes == 3_84:
A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase )
from_model.eval()
A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Dict = from_model.state_dict()
A_ : Tuple = list(from_model.state_dict().keys() )
A_ : str = list(our_model.state_dict().keys() )
print(len(__lowercase ) ,len(__lowercase ) )
for i in range(len(__lowercase ) ):
A_ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowercase )
A_ : str = torch.randn((2, 3, 2_24, 2_24) )
A_ : str = from_model(__lowercase )
A_ : Optional[Any] = our_model(__lowercase ).logits
assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one."
A_ : List[str] = name
print(__lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ):
'''simple docstring'''
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : Optional[int] = 10_00
A_ : Optional[int] = (1, num_labels)
A_ : int = 'huggingface/label-files'
A_ : int = num_labels
A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) )
A_ : int = {int(__lowercase ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase )
A_ : Any = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
A_ : Tuple = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 70 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[Any] = logging.get_logger(__name__)
__A : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A : Dict = {
"gpt-neox-20b": 2_048,
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _A=None , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**_A )
UpperCAmelCase = add_prefix_space
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 130 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ):
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_A , )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _A , _A=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create hint
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase = torch.from_numpy(np.array(_A ) ).float() / 2_55.0
UpperCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase = '''A robot, 4k photo'''
UpperCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase = pipeline(
image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=1_0_0 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_A , _A )
| 130 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__lowercase = 0
if start < end:
__lowercase = randint(lowercase__ , lowercase__ )
__lowercase = a[end]
__lowercase = a[pivot]
__lowercase = temp
__lowercase , __lowercase = _in_place_partition(lowercase__ , lowercase__ , lowercase__ )
count += _in_place_quick_sort(lowercase__ , lowercase__ , p - 1 )
count += _in_place_quick_sort(lowercase__ , p + 1 , lowercase__ )
return count
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__lowercase = 0
__lowercase = randint(lowercase__ , lowercase__ )
__lowercase = a[end]
__lowercase = a[pivot]
__lowercase = temp
__lowercase = start - 1
for index in range(lowercase__ , lowercase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowercase = new_pivot_index + 1
__lowercase = a[new_pivot_index]
__lowercase = a[index]
__lowercase = temp
__lowercase = a[new_pivot_index + 1]
__lowercase = a[end]
__lowercase = temp
return new_pivot_index + 1, count
UpperCamelCase__ = TemporaryFile()
UpperCamelCase__ = 1_00 # 1000 elements are to be sorted
UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation
UpperCamelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase__ = np.load(outfile)
UpperCamelCase__ = len(M) - 1
UpperCamelCase__ = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 634 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "words.txt" )
lowerCAmelCase__ = ""
with open(lowerCAmelCase_ ) as f:
lowerCAmelCase__ = f.readline()
lowerCAmelCase__ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowerCAmelCase__ = [
word
for word in [sum(ord(lowerCAmelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 61 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case=True, snake_case="pt"):
__snake_case = {'''add_prefix_space''': True} if isinstance(snake_case, snake_case) and not line.startswith(''' ''') else {}
__snake_case = padding_side
return tokenizer(
[line], max_length=snake_case, padding='''max_length''' if pad_to_max_length else None, truncation=snake_case, return_tensors=snake_case, add_special_tokens=snake_case, **snake_case, )
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=None, ):
__snake_case = input_ids.ne(snake_case).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , A_ : Any , A_ : int , A_ : Dict , A_ : Union[str, Any] , A_ : List[Any]="train" , A_ : Dict=None , A_ : str=None , A_ : List[Any]=None , A_ : int="" , ) -> str:
super().__init__()
__snake_case = Path(A_ ).joinpath(type_path + '''.source''' )
__snake_case = Path(A_ ).joinpath(type_path + '''.target''' )
__snake_case = self.get_char_lens(self.src_file )
__snake_case = max_source_length
__snake_case = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__snake_case = tokenizer
__snake_case = prefix
if n_obs is not None:
__snake_case = self.src_lens[:n_obs]
__snake_case = src_lang
__snake_case = tgt_lang
def __len__( self : Optional[int] ) -> Union[str, Any]:
return len(self.src_lens )
def __getitem__( self : Any , A_ : Any ) -> Dict[str, torch.Tensor]:
__snake_case = index + 1 # linecache starts at 1
__snake_case = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('''\n''' )
__snake_case = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('''\n''' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
__snake_case = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
__snake_case = encode_line(A_ , A_ , self.max_source_length , '''right''' )
__snake_case = encode_line(A_ , A_ , self.max_target_length , '''right''' )
__snake_case = source_inputs['''input_ids'''].squeeze()
__snake_case = target_inputs['''input_ids'''].squeeze()
__snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( A_ : Optional[Any] ) -> Any:
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def lowercase ( self : List[str] , A_ : Any ) -> Dict[str, torch.Tensor]:
__snake_case = torch.stack([x['''input_ids'''] for x in batch] )
__snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
__snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
__snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
__snake_case = trim_batch(A_ , A_ )
__snake_case , __snake_case = trim_batch(A_ , A_ , attention_mask=A_ )
__snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__lowercase : Optional[int] = getLogger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case):
return list(itertools.chain.from_iterable(snake_case))
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = get_git_info()
save_json(snake_case, os.path.join(snake_case, '''git_log.json'''))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case=4, **snake_case):
with open(snake_case, '''w''') as f:
json.dump(snake_case, snake_case, indent=snake_case, **snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
with open(snake_case) as f:
return json.load(snake_case)
def SCREAMING_SNAKE_CASE ( ):
__snake_case = git.Repo(search_parent_directories=snake_case)
__snake_case = {
'''repo_id''': str(snake_case),
'''repo_sha''': str(repo.head.object.hexsha),
'''repo_branch''': str(repo.active_branch),
'''hostname''': str(socket.gethostname()),
}
return repo_infos
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return list(map(snake_case, snake_case))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
with open(snake_case, '''wb''') as f:
return pickle.dump(snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
def remove_articles(snake_case):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', snake_case)
def white_space_fix(snake_case):
return " ".join(text.split())
def remove_punc(snake_case):
__snake_case = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case))))
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = normalize_answer(snake_case).split()
__snake_case = normalize_answer(snake_case).split()
__snake_case = Counter(snake_case) & Counter(snake_case)
__snake_case = sum(common.values())
if num_same == 0:
return 0
__snake_case = 1.0 * num_same / len(snake_case)
__snake_case = 1.0 * num_same / len(snake_case)
__snake_case = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
return normalize_answer(snake_case) == normalize_answer(snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
assert len(snake_case) == len(snake_case)
__snake_case = 0
for hypo, pred in zip(snake_case, snake_case):
em += exact_match_score(snake_case, snake_case)
if len(snake_case) > 0:
em /= len(snake_case)
return {"em": em}
def SCREAMING_SNAKE_CASE ( snake_case):
return model_prefix.startswith('''rag''')
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(snake_case, snake_case, snake_case):
if not hasattr(snake_case, snake_case) and not hasattr(snake_case, equivalent_param[p]):
logger.info('''config doesn\'t have a `{}` attribute'''.format(snake_case))
delattr(snake_case, snake_case)
continue
__snake_case = p if hasattr(snake_case, snake_case) else equivalent_param[p]
setattr(snake_case, snake_case, getattr(snake_case, snake_case))
delattr(snake_case, snake_case)
return hparams, config
| 564 | 0 |
# Lint as: python3
import itertools
import os
import re
_UpperCamelCase = re.compile(r"([A-Z]+)([A-Z][a-z])")
_UpperCamelCase = re.compile(r"([a-z\d])([A-Z])")
_UpperCamelCase = re.compile(r"(?<!_)_(?!_)")
_UpperCamelCase = re.compile(r"(_{2,})")
_UpperCamelCase = r"^\w+(\.\w+)*$"
_UpperCamelCase = r"<>:/\|?*"
def _lowercase ( lowercase__ ):
__lowerCAmelCase : int = _uppercase_uppercase_re.sub(r'''\1_\2''' , lowercase__ )
__lowerCAmelCase : List[Any] = _lowercase_uppercase_re.sub(r'''\1_\2''' , lowercase__ )
return name.lower()
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = _single_underscore_re.split(lowercase__ )
__lowerCAmelCase : str = [_multiple_underscores_re.split(lowercase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase__ ) if n != '''''' )
def _lowercase ( lowercase__ ):
if os.path.basename(lowercase__ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowercase__ )
def _lowercase ( lowercase__ , lowercase__ ):
if os.path.basename(lowercase__ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowercase__ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(lowercase__ )}-{split}"""
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
__lowerCAmelCase : Tuple = filename_prefix_for_split(lowercase__ , lowercase__ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
__lowerCAmelCase : Any = os.path.join(lowercase__ , lowercase__ )
return f"""{filepath}*"""
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
__lowerCAmelCase : int = filename_prefix_for_split(lowercase__ , lowercase__ )
__lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , lowercase__ )
if shard_lengths:
__lowerCAmelCase : List[Any] = len(lowercase__ )
__lowerCAmelCase : Dict = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowercase__ )]
if filetype_suffix:
__lowerCAmelCase : List[Any] = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
__lowerCAmelCase : Optional[int] = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 711 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowercase ( lowercase__ = 2_0_0_0_0_0_0 ):
__lowerCAmelCase : list[int] = [0]
__lowerCAmelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase : int = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase : float
# the largest integer less than b_estimate
__lowerCAmelCase : int
# the largest integer less than b_estimate
__lowerCAmelCase : int
# the triangle number corresponding to b_floor
__lowerCAmelCase : int
# the triangle number corresponding to b_ceil
__lowerCAmelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCAmelCase : Any = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase : List[str] = floor(lowercase__ )
__lowerCAmelCase : str = ceil(lowercase__ )
__lowerCAmelCase : Dict = triangle_numbers[b_floor]
__lowerCAmelCase : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase : Union[str, Any] = triangle_b_first_guess * triangle_a
__lowerCAmelCase : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase : Optional[Any] = triangle_b_second_guess * triangle_a
__lowerCAmelCase : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 583 | 0 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase ( __A : str=None ) -> Tuple:
'''simple docstring'''
if subparsers is not None:
snake_case : List[str] = subparsers.add_parser("""env""" )
else:
snake_case : Tuple = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=A__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def lowercase ( __A : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case : Tuple = torch.__version__
snake_case : str = torch.cuda.is_available()
snake_case : Optional[int] = is_xpu_available()
snake_case : List[str] = is_npu_available()
snake_case : Union[str, Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(A__ ):
snake_case : Optional[int] = load_config_from_file(args.config_file ).to_dict()
snake_case : Any = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(A__ ),
"""PyTorch NPU available""": str(A__ ),
"""System RAM""": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
snake_case : int = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
snake_case : str = (
"""\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(A__ , A__ )
else f"""\t{accelerate_config}"""
)
print(A__ )
snake_case : int = accelerate_config
return info
def lowercase ( ) -> int:
'''simple docstring'''
snake_case : List[str] = env_command_parser()
snake_case : List[str] = parser.parse_args()
env_command(A__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 36 |
'''simple docstring'''
from __future__ import annotations
def __a ( A__ ) -> int:
if not nums:
return 0
lowerCAmelCase = nums[0]
lowerCAmelCase = 0
for num in nums[1:]:
lowerCAmelCase , lowerCAmelCase = (
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 108 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase_ = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
lowercase_ = 3e8 # unit of c : m * s^-1
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if (force, area, distance).count(0) != 1:
raise ValueError('''One and only one argument must be 0''')
if force < 0:
raise ValueError('''Magnitude of force can not be negative''')
if distance < 0:
raise ValueError('''Distance can not be negative''')
if area < 0:
raise ValueError('''Area can not be negative''')
if force == 0:
_a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_a = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_a = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''')
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
A__ = data
A__ = previous
A__ = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def lowercase_ ( self ):
'''simple docstring'''
return self.data
def lowercase_ ( self ):
'''simple docstring'''
return self.next
def lowercase_ ( self ):
'''simple docstring'''
return self.previous
class lowerCAmelCase__ :
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = head
def __iter__( self ):
'''simple docstring'''
return self
def lowercase_ ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
A__ = self.current.get_data()
A__ = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self ):
'''simple docstring'''
A__ = None # First node in list
A__ = None # Last node in list
def __str__( self ):
'''simple docstring'''
A__ = self.head
A__ = []
while current is not None:
nodes.append(current.get_data() )
A__ = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while current:
if current.get_data() == value:
return True
A__ = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowercase_ ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
A__ = node
A__ = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.previous
if node.get_previous() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = node
A__ = node.next
if node.get_next() is None:
A__ = node_to_insert
else:
A__ = node_to_insert
A__ = node_to_insert
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1
A__ = Node(UpperCamelCase__ )
A__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
A__ = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self.head
while node:
if node.get_data() == item:
return node
A__ = node.get_next()
raise Exception("Node not found" )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
A__ = self.head.get_next()
if node == self.tail:
A__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def lowercase_ ( UpperCamelCase__ ):
'''simple docstring'''
if node.get_next():
A__ = node.previous
if node.get_previous():
A__ = node.next
A__ = None
A__ = None
def lowercase_ ( self ):
'''simple docstring'''
return self.head is None
def __a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = """T5Config"""
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any ="""mt5"""
UpperCAmelCase__ : Any =MTaConfig
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int ="""mt5"""
UpperCAmelCase__ : Optional[Any] =MTaConfig
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple ="""mt5"""
UpperCAmelCase__ : str =MTaConfig
| 446 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : List[Any] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = resnets
SCREAMING_SNAKE_CASE : Optional[int] = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=True ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE : List[str] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : int = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=True ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE : List[str] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE : Dict = self.downsamplers_a(UpperCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : int ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : Any = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = resnets
SCREAMING_SNAKE_CASE : List[Any] = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=True ) ->Tuple:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE : str = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Dict = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Tuple = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =True
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE : str = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=True ) ->Union[str, Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE : Dict = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE : Any = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
if self.add_upsample:
SCREAMING_SNAKE_CASE : Optional[int] = self.upsamplers_a(UpperCAmelCase__ )
return hidden_states
class a__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase__ : int
UpperCAmelCase__ : float =0.0
UpperCAmelCase__ : int =1
UpperCAmelCase__ : int =1
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : bool =False
UpperCAmelCase__ : jnp.dtype =jnp.floataa
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE : List[str] = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = resnets
SCREAMING_SNAKE_CASE : Tuple = attentions
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str=True ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.resnets[0](UpperCAmelCase__ , UpperCAmelCase__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE : Optional[int] = attn(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = resnet(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=UpperCAmelCase__ )
return hidden_states
| 446 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Dict = AudioLDMPipeline
lowerCamelCase_ : List[Any] = TEXT_TO_AUDIO_PARAMS
lowerCamelCase_ : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase_ : Any = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__magic_name__ , )
snake_case_ : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
snake_case_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
snake_case_ : str = ClapTextModelWithProjection(__magic_name__ )
snake_case_ : Union[str, Any] = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
snake_case_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__magic_name__ , )
snake_case_ : List[str] = SpeechTaHifiGan(__magic_name__ )
snake_case_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__magic_name__ ).startswith('''mps''' ):
snake_case_ : Tuple = torch.manual_seed(__magic_name__ )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
snake_case_ : Optional[int] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : List[str] = AudioLDMPipeline(**__magic_name__ )
snake_case_ : str = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Tuple = audioldm_pipe(**__magic_name__ )
snake_case_ : int = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 256
snake_case_ : Optional[Any] = audio[:10]
snake_case_ : Tuple = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : List[Any] = AudioLDMPipeline(**__magic_name__ )
snake_case_ : int = audioldm_pipe.to(__magic_name__ )
snake_case_ : List[Any] = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Any = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Optional[Any] = 3 * [inputs['''prompt''']]
# forward
snake_case_ : str = audioldm_pipe(**__magic_name__ )
snake_case_ : Optional[Any] = output.audios[0]
snake_case_ : str = self.get_dummy_inputs(__magic_name__ )
snake_case_ : List[Any] = 3 * [inputs.pop('''prompt''' )]
snake_case_ : int = audioldm_pipe.tokenizer(
__magic_name__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors='''pt''' , )
snake_case_ : Tuple = text_inputs['''input_ids'''].to(__magic_name__ )
snake_case_ : str = audioldm_pipe.text_encoder(
__magic_name__ , )
snake_case_ : List[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case_ : Tuple = F.normalize(__magic_name__ , dim=-1 )
snake_case_ : List[Any] = prompt_embeds
# forward
snake_case_ : Optional[Any] = audioldm_pipe(**__magic_name__ )
snake_case_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : List[Any] = AudioLDMPipeline(**__magic_name__ )
snake_case_ : Union[str, Any] = audioldm_pipe.to(__magic_name__ )
snake_case_ : List[Any] = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Tuple = 3 * ['''this is a negative prompt''']
snake_case_ : Union[str, Any] = negative_prompt
snake_case_ : Optional[int] = 3 * [inputs['''prompt''']]
# forward
snake_case_ : str = audioldm_pipe(**__magic_name__ )
snake_case_ : str = output.audios[0]
snake_case_ : List[Any] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Dict = 3 * [inputs.pop('''prompt''' )]
snake_case_ : List[str] = []
for p in [prompt, negative_prompt]:
snake_case_ : List[str] = audioldm_pipe.tokenizer(
__magic_name__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors='''pt''' , )
snake_case_ : Optional[Any] = text_inputs['''input_ids'''].to(__magic_name__ )
snake_case_ : int = audioldm_pipe.text_encoder(
__magic_name__ , )
snake_case_ : str = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
snake_case_ : List[Any] = F.normalize(__magic_name__ , dim=-1 )
embeds.append(__magic_name__ )
snake_case_ , snake_case_ : Optional[int] = embeds
# forward
snake_case_ : List[Any] = audioldm_pipe(**__magic_name__ )
snake_case_ : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : int = PNDMScheduler(skip_prk_steps=__magic_name__ )
snake_case_ : Optional[int] = AudioLDMPipeline(**__magic_name__ )
snake_case_ : List[str] = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Union[str, Any] = '''egg cracking'''
snake_case_ : Union[str, Any] = audioldm_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
snake_case_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 256
snake_case_ : int = audio[:10]
snake_case_ : Any = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Dict = self.get_dummy_components()
snake_case_ : int = PNDMScheduler(skip_prk_steps=__magic_name__ )
snake_case_ : Dict = AudioLDMPipeline(**__magic_name__ )
snake_case_ : Tuple = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : str = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
snake_case_ : int = audioldm_pipe(__magic_name__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
snake_case_ : Optional[int] = 2
snake_case_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
snake_case_ : int = 2
snake_case_ : Optional[Any] = audioldm_pipe(__magic_name__ , num_inference_steps=2 , num_waveforms_per_prompt=__magic_name__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
snake_case_ : Any = 2
snake_case_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__magic_name__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Dict = AudioLDMPipeline(**__magic_name__ )
snake_case_ : Tuple = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
snake_case_ : int = self.get_dummy_inputs(__magic_name__ )
snake_case_ : str = audioldm_pipe(audio_length_in_s=0.016 , **__magic_name__ )
snake_case_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) / vocoder_sampling_rate == 0.016
snake_case_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **__magic_name__ )
snake_case_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) / vocoder_sampling_rate == 0.032
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Union[str, Any] = AudioLDMPipeline(**__magic_name__ )
snake_case_ : Optional[Any] = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = ['''hey''']
snake_case_ : str = audioldm_pipe(__magic_name__ , num_inference_steps=1 )
snake_case_ : int = output.audios.shape
assert audio_shape == (1, 256)
snake_case_ : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
snake_case_ : str = SpeechTaHifiGan(__magic_name__ ).to(__magic_name__ )
snake_case_ : Union[str, Any] = audioldm_pipe(__magic_name__ , num_inference_steps=1 )
snake_case_ : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__magic_name__ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__magic_name__ )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase (self , __magic_name__ , __magic_name__="cpu" , __magic_name__=torch.floataa , __magic_name__=0 ) -> Dict:
'''simple docstring'''
snake_case_ : int = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
snake_case_ : Optional[int] = np.random.RandomState(__magic_name__ ).standard_normal((1, 8, 128, 16) )
snake_case_ : Optional[Any] = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
snake_case_ : Dict = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
snake_case_ : str = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Tuple = self.get_inputs(__magic_name__ )
snake_case_ : Optional[Any] = 25
snake_case_ : Any = audioldm_pipe(**__magic_name__ ).audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 8_1920
snake_case_ : Union[str, Any] = audio[7_7230:7_7240]
snake_case_ : str = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
snake_case_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
snake_case_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
snake_case_ : Dict = audioldm_pipe.to(__magic_name__ )
audioldm_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : List[Any] = self.get_inputs(__magic_name__ )
snake_case_ : List[Any] = audioldm_pipe(**__magic_name__ ).audios[0]
assert audio.ndim == 1
assert len(__magic_name__ ) == 8_1920
snake_case_ : Any = audio[2_7780:2_7790]
snake_case_ : List[str] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
snake_case_ : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 60 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
snake_case__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Tuple , *lowerCamelCase : Dict , **lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "decord" )
self.check_model_type(lowerCamelCase )
def _snake_case ( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Any=None , lowerCamelCase : Any=None ):
'''simple docstring'''
__lowercase = {}
if frame_sampling_rate is not None:
__lowercase = frame_sampling_rate
if num_frames is not None:
__lowercase = num_frames
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , lowerCamelCase : Union[str, List[str]] , **lowerCamelCase : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Any , lowerCamelCase : int , lowerCamelCase : Any=None , lowerCamelCase : Tuple=1 ):
'''simple docstring'''
if num_frames is None:
__lowercase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
__lowercase = BytesIO(requests.get(lowerCamelCase ).content )
__lowercase = VideoReader(lowerCamelCase )
videoreader.seek(0 )
__lowercase = 0
__lowercase = num_frames * frame_sampling_rate - 1
__lowercase = np.linspace(lowerCamelCase , lowerCamelCase , num=lowerCamelCase , dtype=np.intaa )
__lowercase = videoreader.get_batch(lowerCamelCase ).asnumpy()
__lowercase = list(lowerCamelCase )
__lowercase = self.image_processor(lowerCamelCase , return_tensors=self.framework )
return model_inputs
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = self.model(**lowerCamelCase )
return model_outputs
def _snake_case ( self : int , lowerCamelCase : Any , lowerCamelCase : Any=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(lowerCamelCase )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 655 |
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : Dict=2_048 ):
'''simple docstring'''
__lowercase = config.__dict__
__lowercase = modal_hidden_size
if num_labels:
__lowercase = num_labels
| 655 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :str = logging.get_logger(__name__)
UpperCAmelCase__ :List[str] = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
snake_case__ : Any = 'glpn'
def __init__( self : int , A__ : Optional[Any]=3 , A__ : Optional[int]=4 , A__ : Tuple=[2, 2, 2, 2] , A__ : Dict=[8, 4, 2, 1] , A__ : int=[32, 64, 160, 256] , A__ : Optional[int]=[7, 3, 3, 3] , A__ : Union[str, Any]=[4, 2, 2, 2] , A__ : Optional[int]=[1, 2, 5, 8] , A__ : List[Any]=[4, 4, 4, 4] , A__ : int="gelu" , A__ : Any=0.0 , A__ : Dict=0.0 , A__ : str=0.02 , A__ : Optional[int]=0.1 , A__ : Optional[int]=1e-6 , A__ : Optional[int]=64 , A__ : Optional[int]=10 , A__ : Dict=-1 , **A__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**a__ )
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Any = num_encoder_blocks
__lowerCamelCase : Union[str, Any] = depths
__lowerCamelCase : Tuple = sr_ratios
__lowerCamelCase : Union[str, Any] = hidden_sizes
__lowerCamelCase : Dict = patch_sizes
__lowerCamelCase : Union[str, Any] = strides
__lowerCamelCase : Optional[int] = mlp_ratios
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Optional[Any] = drop_path_rate
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : Optional[int] = decoder_hidden_size
__lowerCamelCase : List[str] = max_depth
__lowerCamelCase : Tuple = head_in_index
| 150 |
class _UpperCAmelCase :
def __init__( self , a__ , a__ , a__ ):
A_ : str = None
A_ : Any = None
A_ : Any = graph
self._normalize_graph(a__ , a__ )
A_ : Tuple = len(a__ )
A_ : Union[str, Any] = None
def _lowerCamelCase ( self , a__ , a__ ):
if sources is int:
A_ : str = [sources]
if sinks is int:
A_ : int = [sinks]
if len(a__ ) == 0 or len(a__ ) == 0:
return
A_ : Optional[Any] = sources[0]
A_ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a__ ) > 1 or len(a__ ) > 1:
A_ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ : Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ : Dict = max_input_flow
A_ : List[Any] = 0
A_ : List[str] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ : Optional[int] = max_input_flow
A_ : Any = size - 1
def _lowerCamelCase ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _lowerCamelCase ( self , a__ ):
A_ : Optional[Any] = algorithm(self )
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : Optional[Any] = flow_network
A_ : Optional[Any] = flow_network.verticesCount
A_ : int = flow_network.sourceIndex
A_ : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ : Dict = flow_network.graph
A_ : List[str] = False
def _lowerCamelCase ( self ):
if not self.executed:
self._algorithm()
A_ : List[str] = True
def _lowerCamelCase ( self ):
pass
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
# use this to save your result
A_ : Optional[Any] = -1
def _lowerCamelCase ( self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(a__ )
A_ : str = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ : Optional[int] = [0] * self.verticies_count
A_ : Union[str, Any] = [0] * self.verticies_count
def _lowerCamelCase ( self ):
A_ : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ : str = 0
while i < len(a__ ):
A_ : str = vertices_list[i]
A_ : Optional[Any] = self.heights[vertex_index]
self.process_vertex(a__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a__ ) )
A_ : str = 0
else:
i += 1
A_ : int = sum(self.preflow[self.source_index] )
def _lowerCamelCase ( self , a__ ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a__ , a__ )
self.relabel(a__ )
def _lowerCamelCase ( self , a__ , a__ ):
A_ : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _lowerCamelCase ( self , a__ ):
A_ : Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ : List[str] = self.heights[to_index]
if min_height is not None:
A_ : List[str] = min_height + 1
if __name__ == "__main__":
_lowerCAmelCase = [0]
_lowerCAmelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
_lowerCAmelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
_lowerCAmelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
_lowerCAmelCase = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 569 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class snake_case__ ( unittest.TestCase):
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = [0]
UpperCAmelCase_ : Optional[Any] = [0]
UpperCAmelCase_ : List[Any] = len(_A )
self.assertEqual(k.knapsack(_A , _A , _A , _A ) , 0 )
UpperCAmelCase_ : Optional[int] = [60]
UpperCAmelCase_ : List[str] = [10]
UpperCAmelCase_ : Dict = len(_A )
self.assertEqual(k.knapsack(_A , _A , _A , _A ) , 0 )
def A ( self : int ) -> Dict:
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Tuple = [1, 2, 3]
UpperCAmelCase_ : Dict = [3, 2, 1]
UpperCAmelCase_ : Union[str, Any] = len(_A )
self.assertEqual(k.knapsack(_A , _A , _A , _A ) , 5 )
def A ( self : str ) -> int:
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : Optional[Any] = [60, 1_00, 1_20]
UpperCAmelCase_ : int = [10, 20, 30]
UpperCAmelCase_ : Optional[Any] = len(_A )
self.assertEqual(k.knapsack(_A , _A , _A , _A ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 216 |
'''simple docstring'''
def __UpperCAmelCase ( A : List[str] , A : Tuple , A : Union[str, Any]=False ) -> Tuple:
if isinstance(A , A ) and isinstance(A , A ):
UpperCAmelCase_ : Any = len(set_a.intersection(A ) )
if alternative_union:
UpperCAmelCase_ : Optional[Any] = len(A ) + len(A )
else:
UpperCAmelCase_ : Dict = len(set_a.union(A ) )
return intersection / union
if isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase_ : Tuple = len(A ) + len(A )
return len(A ) / union
else:
UpperCAmelCase_ : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(A ) / len(A )
return len(A ) / len(A )
return None
if __name__ == "__main__":
_UpperCamelCase : Any = {'a', 'b', 'c', 'd', 'e'}
_UpperCamelCase : Optional[int] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 216 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __SCREAMING_SNAKE_CASE:
_UpperCAmelCase = None
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase )
snake_case__ = self.feature_extraction_class.from_json_file(UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
snake_case__ = self.feature_extraction_class.from_pretrained(UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase )
| 328 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = PhobertTokenizer
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
snake_case__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
snake_case__ = ['#version: 0.2', 'l à</w>']
snake_case__ = {'unk_token': '<unk>'}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def lowerCAmelCase_ ( self: str , **UpperCamelCase: Dict ) -> int:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Any ) -> Optional[int]:
snake_case__ = 'Tôi là VinAI Research'
snake_case__ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def lowerCAmelCase_ ( self: Tuple ) -> Tuple:
snake_case__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ = 'Tôi là VinAI Research'
snake_case__ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
snake_case__ = tokenizer.tokenize(UpperCamelCase )
print(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
| 328 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
_snake_case : int = Mock()
_snake_case : int = conn, Mock()
_snake_case : Any = iter([1, None] )
_snake_case : Any = lambda snake_case__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 705 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 0 |
"""simple docstring"""
import math
def _UpperCamelCase ( UpperCamelCase = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = sum(i * i for i in range(1 , n + 1 ) )
__UpperCAmelCase : Union[str, Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 77 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''EncodecFeatureExtractor'''
A = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = self.feature_extractor
lowerCAmelCase__ :Tuple = False
def snake_case_ ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("sampling_rate" , _lowerCAmelCase )
lowerCAmelCase__ :Dict = kwargs.pop("text" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :Optional[int] = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCAmelCase__ :Any = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if audio is not None:
lowerCAmelCase__ :Tuple = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase__ :List[str] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCAmelCase__ :int = audio_inputs["padding_mask"]
return inputs
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = kwargs.pop("audio" , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = kwargs.pop("padding_mask" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(_lowerCAmelCase , padding_mask=_lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = to_numpy(_lowerCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = to_numpy(_lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase__ :str = seq_len - padding_mask.shape[-1]
lowerCAmelCase__ :Union[str, Any] = 1 - self.feature_extractor.padding_value
lowerCAmelCase__ :Optional[Any] = np.pad(_lowerCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audio_values.tolist()
for i in range(_lowerCAmelCase ):
lowerCAmelCase__ :str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase__ :List[Any] = sliced_audio.reshape(_lowerCAmelCase , -1 )
return audio_values
| 145 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ : str = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ : int = Lock()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 620 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( __magic_name__ ) ->list:
if n_term == "":
return []
__lowercase = []
for temp in range(int(__magic_name__ ) ):
series.append(F'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
_lowercase = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 118 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowercase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowercase = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def lowerCAmelCase__ ( __magic_name__ ) ->Optional[int]:
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
__lowercase = set(__magic_name__ )
return pairs
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : int = VOCAB_FILES_NAMES
_lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , **_lowerCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
__lowercase = vocab_file
__lowercase = merges_file
__lowercase = {}
__lowercase = 0
__lowercase = 1
__lowercase = 2
__lowercase = 3
self.add_from_file(_lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[:-1]
__lowercase = [tuple(merge.split()[:-1] ) for merge in merges]
__lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_lowerCamelCase )
__lowercase = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCamelCase ):
try:
__lowercase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCamelCase )
__lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCamelCase )
__lowercase = "@@ ".join(_lowerCamelCase )
__lowercase = word[:-4]
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = []
__lowercase = re.findall(R"\S+\n?" , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(_lowerCamelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = " ".join(_lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file , _lowerCamelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__lowercase = f.readlines()
for lineTmp in lines:
__lowercase = lineTmp.strip()
__lowercase = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__lowercase = line[:idx]
__lowercase = len(self.encoder )
| 118 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str =params
UpperCamelCase__ : Dict =np.array(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =np.array([len(__SCREAMING_SNAKE_CASE) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.lengths)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =self.params.max_model_input_size
UpperCamelCase__ : List[Any] =self.lengths > max_len
logger.info(F'''Splitting {sum(__SCREAMING_SNAKE_CASE)} too long sequences.''')
def divide_chunks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return [l[i : i + n] for i in range(0 , len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)]
UpperCamelCase__ : str =[]
UpperCamelCase__ : List[Any] =[]
if self.params.mlm:
UpperCamelCase__ , UpperCamelCase__ : int =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
UpperCamelCase__ : List[Any] =[]
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
UpperCamelCase__ : List[str] =np.insert(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE)
if sub_s[-1] != sep_id:
UpperCamelCase__ : int =np.insert(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
assert len(__SCREAMING_SNAKE_CASE) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__SCREAMING_SNAKE_CASE)
new_tok_ids.extend(__SCREAMING_SNAKE_CASE)
new_lengths.extend([len(__SCREAMING_SNAKE_CASE) for l in sub_seqs])
UpperCamelCase__ : Optional[Any] =np.array(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =np.array(__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] =len(self)
UpperCamelCase__ : List[Any] =self.lengths > 11
UpperCamelCase__ : int =self.token_ids[indices]
UpperCamelCase__ : List[str] =self.lengths[indices]
UpperCamelCase__ : Tuple =len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ : Any =self.params.special_tok_ids["unk_token"]
UpperCamelCase__ : Optional[Any] =len(self)
UpperCamelCase__ : List[Any] =np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
UpperCamelCase__ : Optional[Any] =(unk_occs / self.lengths) < 0.5
UpperCamelCase__ : List[str] =self.token_ids[indices]
UpperCamelCase__ : Optional[int] =self.lengths[indices]
UpperCamelCase__ : int =len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =[t[0] for t in batch]
UpperCamelCase__ : Dict =[t[1] for t in batch]
assert len(__SCREAMING_SNAKE_CASE) == len(__SCREAMING_SNAKE_CASE)
# Max for paddings
UpperCamelCase__ : int =max(__SCREAMING_SNAKE_CASE)
# Pad token ids
if self.params.mlm:
UpperCamelCase__ : Dict =self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase__ : int =self.params.special_tok_ids["unk_token"]
UpperCamelCase__ : Any =[list(t.astype(__SCREAMING_SNAKE_CASE)) + [pad_idx] * (max_seq_len_ - len(__SCREAMING_SNAKE_CASE)) for t in token_ids]
assert len(tk_) == len(__SCREAMING_SNAKE_CASE)
assert all(len(__SCREAMING_SNAKE_CASE) == max_seq_len_ for t in tk_)
UpperCamelCase__ : Any =torch.tensor(tk_) # (bs, max_seq_len_)
UpperCamelCase__ : List[str] =torch.tensor(__SCREAMING_SNAKE_CASE) # (bs)
return tk_t, lg_t
| 582 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = LEDTokenizer
snake_case__ = LEDTokenizerFast
snake_case__ = True
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Any =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
UpperCamelCase__ : Optional[int] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Optional[int] ={"unk_token": "<unk>"}
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384")
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ : Optional[int] =[0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE) , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : int =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIn("input_ids" , __SCREAMING_SNAKE_CASE)
self.assertIn("attention_mask" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("labels" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("decoder_attention_mask" , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Tuple =tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt")
self.assertEqual(32 , targets["input_ids"].shape[1])
@require_torch
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Optional[int] =tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 51_22))
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =["A long paragraph for summarization."]
UpperCamelCase__ : Any =[
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : str =tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : int =inputs["input_ids"]
UpperCamelCase__ : Tuple =targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =["Summary of the text.", "Another summary."]
UpperCamelCase__ : List[str] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =[[0] * len(__SCREAMING_SNAKE_CASE) for x in encoded_output["input_ids"]]
UpperCamelCase__ : Any =tokenizer.pad(__SCREAMING_SNAKE_CASE)
self.assertSequenceEqual(outputs["global_attention_mask"] , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] ="A, <mask> AllenNLP sentence."
UpperCamelCase__ : List[Any] =tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
UpperCamelCase__ : List[str] =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
UpperCamelCase__ : str =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 582 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "ResNetConfig"
# Base docstring
__UpperCAmelCase = "microsoft/resnet-50"
__UpperCAmelCase = [1, 2_048, 7, 7]
# Image classification docstring
__UpperCAmelCase = "microsoft/resnet-50"
__UpperCAmelCase = "tiger cat"
__UpperCAmelCase = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase_ ( nn.Module ):
def __init__( self : List[str] , _lowercase : str , _lowercase : int , _lowercase : Dict = 3 , _lowercase : List[Any] = 1 , _lowercase : str = "relu" ):
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Convad(
_lowercase , _lowercase , kernel_size=_lowercase , stride=_lowercase , padding=kernel_size // 2 , bias=_lowercase )
lowerCAmelCase__ : Any = nn.BatchNormad(_lowercase )
lowerCAmelCase__ : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : Optional[Any] , _lowercase : Any ):
lowerCAmelCase__ : Optional[int] = self.convolution(_lowercase )
lowerCAmelCase__ : Dict = self.normalization(_lowercase )
lowerCAmelCase__ : int = self.activation(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Dict , _lowercase : Optional[Any] ):
super().__init__()
lowerCAmelCase__ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ : Optional[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCAmelCase__ : Union[str, Any] = config.num_channels
def _lowerCAmelCase ( self : Dict , _lowercase : Any ):
lowerCAmelCase__ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCAmelCase__ : Any = self.embedder(_lowercase )
lowerCAmelCase__ : List[Any] = self.pooler(_lowercase )
return embedding
class lowercase_ ( nn.Module ):
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : int , _lowercase : List[str] = 2 ):
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Convad(_lowercase , _lowercase , kernel_size=1 , stride=_lowercase , bias=_lowercase )
lowerCAmelCase__ : Optional[Any] = nn.BatchNormad(_lowercase )
def _lowerCAmelCase ( self : Dict , _lowercase : Tuple ):
lowerCAmelCase__ : List[str] = self.convolution(_lowercase )
lowerCAmelCase__ : List[str] = self.normalization(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Any , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Dict = 1 , _lowercase : int = "relu" ):
super().__init__()
lowerCAmelCase__ : Optional[Any] = in_channels != out_channels or stride != 1
lowerCAmelCase__ : Tuple = (
ResNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Tuple = nn.Sequential(
ResNetConvLayer(_lowercase , _lowercase , stride=_lowercase ) , ResNetConvLayer(_lowercase , _lowercase , activation=_lowercase ) , )
lowerCAmelCase__ : Optional[int] = ACTaFN[activation]
def _lowerCAmelCase ( self : Tuple , _lowercase : List[str] ):
lowerCAmelCase__ : int = hidden_state
lowerCAmelCase__ : Union[str, Any] = self.layer(_lowercase )
lowerCAmelCase__ : Optional[Any] = self.shortcut(_lowercase )
hidden_state += residual
lowerCAmelCase__ : Tuple = self.activation(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : List[str] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] = 1 , _lowercase : Any = "relu" , _lowercase : List[Any] = 4 ):
super().__init__()
lowerCAmelCase__ : int = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[str] = out_channels // reduction
lowerCAmelCase__ : Dict = (
ResNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : List[Any] = nn.Sequential(
ResNetConvLayer(_lowercase , _lowercase , kernel_size=1 ) , ResNetConvLayer(_lowercase , _lowercase , stride=_lowercase ) , ResNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=_lowercase ) , )
lowerCAmelCase__ : Any = ACTaFN[activation]
def _lowerCAmelCase ( self : int , _lowercase : Optional[int] ):
lowerCAmelCase__ : Dict = hidden_state
lowerCAmelCase__ : str = self.layer(_lowercase )
lowerCAmelCase__ : str = self.shortcut(_lowercase )
hidden_state += residual
lowerCAmelCase__ : Union[str, Any] = self.activation(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : str , _lowercase : str , _lowercase : int , _lowercase : int , _lowercase : List[str] = 2 , _lowercase : List[Any] = 2 , ):
super().__init__()
lowerCAmelCase__ : Tuple = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowerCAmelCase__ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowercase , _lowercase , stride=_lowercase , activation=config.hidden_act ) , *[layer(_lowercase , _lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Tuple ):
lowerCAmelCase__ : int = input
for layer in self.layers:
lowerCAmelCase__ : str = layer(_lowercase )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : str , _lowercase : Union[str, Any] ):
super().__init__()
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(_lowercase , _lowercase , _lowercase , depth=_lowercase ) )
def _lowerCAmelCase ( self : int , _lowercase : Tuple , _lowercase : Union[str, Any] = False , _lowercase : int = True ):
lowerCAmelCase__ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ : List[Any] = hidden_states + (hidden_state,)
lowerCAmelCase__ : List[Any] = stage_module(_lowercase )
if output_hidden_states:
lowerCAmelCase__ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowercase , hidden_states=_lowercase , )
class lowercase_ ( a_ ):
__magic_name__ : Tuple = ResNetConfig
__magic_name__ : Tuple = 'resnet'
__magic_name__ : List[Any] = 'pixel_values'
__magic_name__ : Union[str, Any] = True
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Optional[Any] ):
if isinstance(_lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Tuple=False ):
if isinstance(_lowercase , _lowercase ):
lowerCAmelCase__ : Optional[Any] = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , a_ , )
class lowercase_ ( a_ ):
def __init__( self : List[str] , _lowercase : List[str] ):
super().__init__(_lowercase )
lowerCAmelCase__ : Optional[int] = config
lowerCAmelCase__ : int = ResNetEmbeddings(_lowercase )
lowerCAmelCase__ : str = ResNetEncoder(_lowercase )
lowerCAmelCase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : Tuple , _lowercase : int , _lowercase : Optional[int] = None , _lowercase : Dict = None ):
lowerCAmelCase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : int = self.embedder(_lowercase )
lowerCAmelCase__ : Any = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowerCAmelCase__ : Dict = encoder_outputs[0]
lowerCAmelCase__ : Optional[Any] = self.pooler(_lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , a_ , )
class lowercase_ ( a_ ):
def __init__( self : int , _lowercase : List[Any] ):
super().__init__(_lowercase )
lowerCAmelCase__ : List[str] = config.num_labels
lowerCAmelCase__ : Any = ResNetModel(_lowercase )
# classification head
lowerCAmelCase__ : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : str , _lowercase : Optional[Any] = None , _lowercase : Union[str, Any] = None , _lowercase : Dict = None , _lowercase : Optional[Any] = None , ):
lowerCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.resnet(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowerCAmelCase__ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Tuple = self.classifier(_lowercase )
lowerCAmelCase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Tuple = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : List[str] = "single_label_classification"
else:
lowerCAmelCase__ : List[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ : str = loss_fct(_lowercase , _lowercase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : List[str] = CrossEntropyLoss()
lowerCAmelCase__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Any = BCEWithLogitsLoss()
lowerCAmelCase__ : Dict = loss_fct(_lowercase , _lowercase )
if not return_dict:
lowerCAmelCase__ : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n """ , a_ , )
class lowercase_ ( a_ , a_ ):
def __init__( self : str , _lowercase : int ):
super().__init__(_lowercase )
super()._init_backbone(_lowercase )
lowerCAmelCase__ : str = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase__ : Any = ResNetEmbeddings(_lowercase )
lowerCAmelCase__ : Optional[Any] = ResNetEncoder(_lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@replace_return_docstrings(output_type=_lowercase , config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Dict , _lowercase : Optional[int] = None , _lowercase : int = None ):
lowerCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Union[str, Any] = self.embedder(_lowercase )
lowerCAmelCase__ : List[Any] = self.encoder(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowerCAmelCase__ : List[Any] = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase__ : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_lowercase , )
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowercase__ : Optional[int] = """bart"""
lowercase__ : Any = ["""past_key_values"""]
lowercase__ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , lowercase : Dict=50_265 , lowercase : Dict=1_024 , lowercase : Tuple=12 , lowercase : Union[str, Any]=4_096 , lowercase : Tuple=16 , lowercase : Tuple=12 , lowercase : str=4_096 , lowercase : Union[str, Any]=16 , lowercase : Optional[int]=0.0 , lowercase : Optional[Any]=0.0 , lowercase : List[str]="gelu" , lowercase : Optional[Any]=1_024 , lowercase : Any=0.1 , lowercase : Optional[int]=0.0 , lowercase : int=0.0 , lowercase : Tuple=0.02 , lowercase : str=0.0 , lowercase : Optional[Any]=False , lowercase : Optional[int]=True , lowercase : List[Any]=3 , lowercase : Union[str, Any]=1 , lowercase : Dict=0 , lowercase : int=2 , lowercase : int=True , lowercase : Optional[Any]=2 , lowercase : Tuple=2 , **lowercase : Any , ) -> str:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = classifier_dropout
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __A ):
__lowercase = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
@property
def snake_case__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase = {0: "batch"}
__lowercase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "decoder_sequence"}
__lowercase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowercase = self.num_layers
for i in range(__A ):
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def snake_case__ ( self : int ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(__A , self ).outputs
if self.use_past:
__lowercase = self.num_layers
for i in range(__A ):
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
__lowercase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def snake_case__ ( self : Any , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**__A , **__A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase = common_inputs["input_ids"].shape
__lowercase = common_inputs["decoder_input_ids"].shape[1]
__lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__A , __A )] , dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase = self.num_layers
__lowercase = min(__A , __A )
__lowercase = max(__A , __A ) - min_num_layers
__lowercase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__A , __A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def snake_case__ ( self : List[Any] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = self.num_layers
__lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__A , __A , dtype=__A )] , dim=1 )
__lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def snake_case__ ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> str:
"""simple docstring"""
__lowercase = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(__A )
__lowercase = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(__A , return_tensors=__A ) )
return common_inputs
def snake_case__ ( self : Tuple , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
return common_inputs
def snake_case__ ( self : int , lowercase : Dict , lowercase : Dict , lowercase : Any , lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(__A , __A , __A , __A )
else:
__lowercase = super(__A , self )._flatten_past_key_values_(
__A , __A , __A , __A )
| 703 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """yolos"""
def __init__( self : Optional[int] , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Tuple=12 , lowercase : str=3_072 , lowercase : Optional[Any]="gelu" , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-1_2 , lowercase : Tuple=[512, 864] , lowercase : Optional[int]=16 , lowercase : Dict=3 , lowercase : Optional[Any]=True , lowercase : Optional[int]=100 , lowercase : Optional[int]=True , lowercase : Any=False , lowercase : Any=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Union[str, Any]=5 , lowercase : str=2 , lowercase : Tuple=0.1 , **lowercase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowercase )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = version.parse("""1.11""" )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : int ) -> float:
"""simple docstring"""
return 1E-4
@property
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 634 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a_ : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
a__ = feature_size
a__ = sampling_rate
a__ = padding_value
a__ = kwargs.pop('''padding_side''' , '''right''' )
a__ = kwargs.pop('''return_attention_mask''' , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
a__ = processed_features[self.model_input_names[0]]
a__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE ) == 0:
if return_attention_mask:
a__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ = required_input[0]
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE ):
a__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE ):
a__ = '''tf'''
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
a__ = '''pt'''
elif isinstance(SCREAMING_SNAKE_CASE , (int, float, list, tuple, np.ndarray) ):
a__ = '''np'''
else:
raise ValueError(
f"type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE )}. "
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ = to_numpy(SCREAMING_SNAKE_CASE )
else:
a__ = [to_numpy(SCREAMING_SNAKE_CASE ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ = self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
a__ = processed_features[self.model_input_names[0]]
a__ = len(SCREAMING_SNAKE_CASE )
if not all(len(SCREAMING_SNAKE_CASE ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
a__ = []
for i in range(SCREAMING_SNAKE_CASE ):
a__ = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ = self._truncate(
SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , )
truncated_inputs.append(SCREAMING_SNAKE_CASE )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ = PaddingStrategy.MAX_LENGTH
a__ = {}
for i in range(SCREAMING_SNAKE_CASE ):
# padding
a__ = self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE , padding_strategy=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ = []
if value.dtype is np.dtype(np.floataa ):
a__ = value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE )
return BatchFeature(SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> dict:
a__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ = len(SCREAMING_SNAKE_CASE )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ = np.ones(len(SCREAMING_SNAKE_CASE ) , dtype=np.intaa )
if needs_to_be_padded:
a__ = max_length - len(SCREAMING_SNAKE_CASE )
if self.padding_side == "right":
if return_attention_mask:
a__ = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
a__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ = np.pad(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
a__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ = np.pad(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
a__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ = len(SCREAMING_SNAKE_CASE ) > max_length
if needs_to_be_truncated:
a__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ = processed_features['''attention_mask'''][:max_length]
return processed_features
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
a__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ = PaddingStrategy(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ = padding
else:
a__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 194 |
a_ : str = 6_55_21
def __a ( __UpperCAmelCase ):
a__ = 1
a__ = 0
for plain_chr in plain_text:
a__ = (a + ord(__UpperCAmelCase )) % MOD_ADLER
a__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 194 | 1 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __snake_case ( nn.Module ):
def __init__( self : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_snake_case)))
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , model.state_dict())
UpperCAmelCase_ = os.path.join(_snake_case , '''index.json''')
self.assertTrue(os.path.isfile(_snake_case))
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCAmelCase_ = os.path.join(_snake_case , F"""{key}.dat""")
self.assertTrue(os.path.isfile(_snake_case))
# TODO: add tests on the fact weights are properly loaded
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCAmelCase_ = torch.randn(2 , 3 , dtype=_snake_case)
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = offload_weight(_snake_case , '''weight''' , _snake_case , {})
UpperCAmelCase_ = os.path.join(_snake_case , '''weight.dat''')
self.assertTrue(os.path.isfile(_snake_case))
self.assertDictEqual(_snake_case , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_snake_case).split('''.''')[1]}})
UpperCAmelCase_ = load_offloaded_weight(_snake_case , index['''weight'''])
self.assertTrue(torch.equal(_snake_case , _snake_case))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = model.state_dict()
UpperCAmelCase_ = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCAmelCase_ = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , _snake_case)
UpperCAmelCase_ = OffloadedWeightsLoader(state_dict=_snake_case , save_folder=_snake_case)
# Every key is there with the right value
self.assertEqual(sorted(_snake_case) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case , weight_map[key]))
UpperCAmelCase_ = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCAmelCase_ = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , _snake_case)
UpperCAmelCase_ = OffloadedWeightsLoader(state_dict=_snake_case , save_folder=_snake_case)
# Every key is there with the right value
self.assertEqual(sorted(_snake_case) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case , weight_map[key]))
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , _snake_case)
# Duplicates are removed
UpperCAmelCase_ = OffloadedWeightsLoader(state_dict=_snake_case , save_folder=_snake_case)
# Every key is there with the right value
self.assertEqual(sorted(_snake_case) , sorted(state_dict.keys()))
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case , weight_map[key]))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCAmelCase_ = extract_submodules_state_dict(_snake_case , ['''a.1''', '''a.2'''])
self.assertDictEqual(_snake_case , {'''a.1''': 0, '''a.2''': 2})
UpperCAmelCase_ = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCAmelCase_ = extract_submodules_state_dict(_snake_case , ['''a.1''', '''a.2'''])
self.assertDictEqual(_snake_case , {'''a.1.a''': 0, '''a.2.a''': 2})
| 169 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = '''summarization'''
UpperCAmelCase__ : str = ['''loss''']
UpperCAmelCase__ : Tuple = ROUGE_KEYS
UpperCAmelCase__ : List[str] = '''rouge2'''
def __init__( self : int , _snake_case : List[Any] , **_snake_case : Any):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(_snake_case , num_labels=_snake_case , mode=self.mode , **_snake_case)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
UpperCAmelCase_ = Path(self.output_dir) / '''metrics.json'''
UpperCAmelCase_ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(_snake_case)
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
UpperCAmelCase_ = get_git_info()['''repo_sha''']
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _snake_case):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCamelCase ( self : List[str] , _snake_case : Dict[str, torch.Tensor]):
"""simple docstring"""
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
UpperCAmelCase_ = True
return readable_batch
def lowerCamelCase ( self : List[Any] , _snake_case : Any , **_snake_case : List[str]):
"""simple docstring"""
return self.model(_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[int]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case)
return lmap(str.strip , _snake_case)
def lowerCamelCase ( self : int , _snake_case : dict):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch['''input_ids'''], batch['''attention_mask''']
UpperCAmelCase_ = batch['''labels''']
if isinstance(self.model , _snake_case):
UpperCAmelCase_ = self.model._shift_right(_snake_case)
else:
UpperCAmelCase_ = shift_tokens_right(_snake_case , _snake_case)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(_snake_case)
UpperCAmelCase_ = self(_snake_case , attention_mask=_snake_case , decoder_input_ids=_snake_case , use_cache=_snake_case)
UpperCAmelCase_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_snake_case)
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
UpperCAmelCase_ = nn.functional.log_softmax(_snake_case , dim=-1)
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
_snake_case , _snake_case , self.hparams.label_smoothing , ignore_index=_snake_case)
return (loss,)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return self.tokenizer.pad_token_id
def lowerCamelCase ( self : Optional[Any] , _snake_case : Any , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self._step(_snake_case)
UpperCAmelCase_ = dict(zip(self.loss_names , _snake_case))
# tokens per batch
UpperCAmelCase_ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].shape[0]
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).sum()
UpperCAmelCase_ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
return self._generative_step(_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : Any , _snake_case : Dict="val"):
"""simple docstring"""
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
UpperCAmelCase_ = losses['''loss''']
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(_snake_case).type_as(_snake_case)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(_snake_case)
UpperCAmelCase_ = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(_snake_case) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any]):
"""simple docstring"""
return calculate_rouge(_snake_case , _snake_case)
def lowerCamelCase ( self : int , _snake_case : dict):
"""simple docstring"""
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_snake_case , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(_snake_case)
UpperCAmelCase_ = self.ids_to_clean_text(batch['''labels'''])
UpperCAmelCase_ = self._step(_snake_case)
UpperCAmelCase_ = dict(zip(self.loss_names , _snake_case))
UpperCAmelCase_ = self.calc_generative_metrics(_snake_case , _snake_case)
UpperCAmelCase_ = np.mean(lmap(_snake_case , _snake_case))
base_metrics.update(gen_time=_snake_case , gen_len=_snake_case , preds=_snake_case , target=_snake_case , **_snake_case)
return base_metrics
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict , _snake_case : List[Any]):
"""simple docstring"""
return self._generative_step(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any]):
"""simple docstring"""
return self.validation_epoch_end(_snake_case , prefix='''test''')
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_snake_case , n_obs=_snake_case , max_target_length=_snake_case , **self.dataset_kwargs , )
return dataset
def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : int , _snake_case : bool = False):
"""simple docstring"""
UpperCAmelCase_ = self.get_dataset(_snake_case)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(_snake_case , distributed=self.hparams.gpus > 1)
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
_snake_case , batch_sampler=_snake_case , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_snake_case)
return dataloader
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def lowerCamelCase ( _snake_case : Union[str, Any] , _snake_case : Optional[Any]):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_snake_case , _snake_case)
add_generic_args(_snake_case , _snake_case)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_snake_case , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_snake_case)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_snake_case)
parser.add_argument('''--max_tokens_per_batch''' , type=_snake_case , default=_snake_case)
parser.add_argument('''--logger_name''' , type=_snake_case , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=_snake_case , default=-1 , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=_snake_case , default=500 , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=_snake_case , default=-1 , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=_snake_case , default='''summarization''' , required=_snake_case , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=_snake_case , default=0.0 , required=_snake_case)
parser.add_argument('''--src_lang''' , type=_snake_case , default='''''' , required=_snake_case)
parser.add_argument('''--tgt_lang''' , type=_snake_case , default='''''' , required=_snake_case)
parser.add_argument('''--eval_beams''' , type=_snake_case , default=_snake_case , required=_snake_case)
parser.add_argument(
'''--val_metric''' , type=_snake_case , default=_snake_case , required=_snake_case , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=_snake_case , default=_snake_case , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=_snake_case , default=1 , required=_snake_case , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=_snake_case , default=-1 , required=_snake_case , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = '''translation'''
UpperCAmelCase__ : Union[str, Any] = ['''loss''']
UpperCAmelCase__ : List[str] = ['''bleu''']
UpperCAmelCase__ : Optional[Any] = '''bleu'''
def __init__( self : Tuple , _snake_case : int , **_snake_case : Any):
"""simple docstring"""
super().__init__(_snake_case , **_snake_case)
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def lowerCamelCase ( self : List[str] , _snake_case : Tuple , _snake_case : List[str]):
"""simple docstring"""
return calculate_bleu(_snake_case , _snake_case)
def A (__A : Tuple , __A : Optional[Any]=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__A )
check_output_dir(__A , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__A )
else:
UpperCAmelCase_ = TranslationModule(__A )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get('''WANDB_PROJECT''' , __A )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == '''loss'''
UpperCAmelCase_ = generic_train(
__A , __A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __A ) , early_stopping_callback=__A , logger=__A , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__A ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
snake_case_ : Optional[Any] = pl.Trainer.add_argparse_args(parser)
snake_case_ : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
snake_case_ : Dict = parser.parse_args()
main(args)
| 169 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase_ = logging.get_logger(__name__)
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , a_ : int , a_ : int , a_ : float , **a_ : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = feature_size
UpperCAmelCase_ : List[Any] = sampling_rate
UpperCAmelCase_ : Dict = padding_value
UpperCAmelCase_ : List[Any] = kwargs.pop("""padding_side""" , """right""" )
UpperCAmelCase_ : str = kwargs.pop("""return_attention_mask""" , a_ )
super().__init__(**a_ )
def a ( self : Dict , a_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , a_ : Union[bool, str, PaddingStrategy] = True , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[Union[str, TensorType]] = None , )-> BatchFeature:
"""simple docstring"""
if isinstance(a_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : str = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(a_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : Union[str, Any] = required_input[0]
if isinstance(a_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(a_ ):
UpperCAmelCase_ : Union[str, Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(a_ ):
UpperCAmelCase_ : Any = """tf"""
elif is_torch_tensor(a_ ):
UpperCAmelCase_ : Dict = """pt"""
elif isinstance(a_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(a_ )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : int = to_numpy(a_ )
else:
UpperCAmelCase_ : int = [to_numpy(a_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Tuple = self._get_padding_strategies(padding=a_ , max_length=a_ )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = len(a_ )
if not all(len(a_ ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
UpperCAmelCase_ : Union[str, Any] = []
for i in range(a_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : Optional[int] = self._truncate(
a_ , max_length=a_ , pad_to_multiple_of=a_ , truncation=a_ , )
truncated_inputs.append(a_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : Any = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : int = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : str = {}
for i in range(a_ ):
# padding
UpperCAmelCase_ : Any = self._pad(
truncated_inputs[i] , max_length=a_ , padding_strategy=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Dict = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Dict = value.astype(np.floataa )
batch_outputs[key].append(a_ )
return BatchFeature(a_ , tensor_type=a_ )
def a ( self : int , a_ : Union[Dict[str, np.ndarray], BatchFeature] , a_ : Optional[int] = None , a_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a_ : Optional[int] = None , a_ : Optional[bool] = None , )-> dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : str = len(a_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Tuple = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(a_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[Any] = np.ones(len(a_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : int = max_length - len(a_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : Dict = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
UpperCAmelCase_ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Dict = np.pad(
a_ , a_ , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
UpperCAmelCase_ : str = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : Optional[Any] = np.pad(
a_ , a_ , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def a ( self : int , a_ : Union[Dict[str, np.ndarray], BatchFeature] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : Optional[bool] = None , )-> Dict:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Any = len(a_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Optional[Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def a ( self : Optional[int] , a_ : Union[str, Any]=False , a_ : str=None )-> Dict:
"""simple docstring"""
if padding is not False:
if padding is True:
UpperCAmelCase_ : str = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(a_ , a_ ):
UpperCAmelCase_ : Dict = PaddingStrategy(a_ )
elif isinstance(a_ , a_ ):
UpperCAmelCase_ : Tuple = padding
else:
UpperCAmelCase_ : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 470 |
"""simple docstring"""
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return number | (1 << position)
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def A_ ( lowercase , lowercase ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470 | 1 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A_ :
"""simple docstring"""
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_UpperCAmelCase )} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A_ :
"""simple docstring"""
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "The input training data file (a text file)."} )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowercase : Optional[str] = field(
default=_UpperCAmelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
lowercase : bool = field(default=_UpperCAmelCase , metadata={"help": "Whether ot not to use whole word mask."} )
lowercase : float = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowercase : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowercase : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
lowercase : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowercase : bool = field(
default=_UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ) -> List[str]:
def _dataset(UpperCAmelCase__ , UpperCAmelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , ref_path=UpperCAmelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def A_ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a : str = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
a : Dict = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
a : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
a : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
a : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
a : Union[str, Any] = AutoModelWithLMHead.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
a : Any = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
a : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
a : Union[str, Any] = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
a : List[Any] = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , evaluate=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
a : str = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
a : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
a : int = DataCollatorForLanguageModeling(
tokenizer=UpperCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a : List[str] = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ , )
# Training
if training_args.do_train:
a : Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a : str = trainer.evaluate()
a : Union[str, Any] = math.exp(eval_output['eval_loss'] )
a : Any = {'perplexity': perplexity}
a : int = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , UpperCAmelCase__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(UpperCAmelCase__ )
return results
def A_ ( UpperCAmelCase__ ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=10_00 ) -> int:
a : Dict = p_stop
a : Tuple = max_length
def __iter__( self ) -> str:
a : Optional[Any] = 0
a : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
a : Optional[int] = random.random() < self.p_stop
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> List[str]:
a : Optional[Any] = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
a : str = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
a : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
a : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
a : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
a : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : List[str] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
a : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : int = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
a : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
a : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
a : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
a : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
a : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
a : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
a : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : str = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
a : Dict = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
a : Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
a : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
a : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
a : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
a : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Tuple = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> List[Any]:
a : int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
a : Dict = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ) -> Tuple:
random.seed(__UpperCAmelCase )
a : Dict = list(__UpperCAmelCase )
a : Any = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
a : int = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
a : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
a : Optional[int] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
a : Optional[Any] = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowercase_ ( self ) -> int:
a : Any = 42
a : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
a : Dict = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowercase_ ( self ) -> List[Any]:
a : str = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
a : Any = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> str:
a : Optional[Any] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> int:
a : List[str] = DataLoader(list(range(16 ) ) , batch_size=4 )
a : Dict = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ) -> Any:
a : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self ) -> List[Any]:
Accelerator()
a : Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 509 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase__ : Tuple = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase__ : Optional[int] = 0.01
with locka.acquire():
with pytest.raises(lowercase_ ):
lowercase__ : str = time.time()
locka.acquire(lowercase_ )
assert time.time() - _start > timeout
def UpperCamelCase ( lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : Optional[int] = """a""" * 10_00 + """.lock"""
lowercase__ : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
lowercase__ : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase_ ):
locka.acquire(0 )
| 12 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 34 | 0 |
from __future__ import annotations
def A ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
return array
UpperCAmelCase_ , UpperCAmelCase_ = min(__UpperCAmelCase ), max(__UpperCAmelCase )
# Compute the variables
UpperCAmelCase_ = _max - _min + 1
UpperCAmelCase_ , UpperCAmelCase_ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase_ = i - _min
UpperCAmelCase_ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase_ = 0
for i in range(__UpperCAmelCase ):
while holes_repeat[i] > 0:
UpperCAmelCase_ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input("Enter numbers separated by comma:\n")
UpperCamelCase_ = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 705 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="instructblip_vision_model"
def __init__( self :int , _lowercase :List[str]=1408 , _lowercase :Dict=6144 , _lowercase :List[Any]=39 , _lowercase :List[Any]=16 , _lowercase :Union[str, Any]=224 , _lowercase :int=14 , _lowercase :Any="gelu" , _lowercase :Optional[Any]=1E-6 , _lowercase :List[Any]=0.0 , _lowercase :Union[str, Any]=1E-1_0 , _lowercase :List[Any]=True , **_lowercase :Optional[int] , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = qkv_bias
@classmethod
def __a ( cls :Union[str, Any] , _lowercase :Union[str, os.PathLike] , **_lowercase :Tuple) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
UpperCAmelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : str ="instructblip_qformer"
def __init__( self :List[Any] , _lowercase :List[Any]=30522 , _lowercase :str=768 , _lowercase :str=12 , _lowercase :int=12 , _lowercase :str=3072 , _lowercase :Optional[Any]="gelu" , _lowercase :Optional[int]=0.1 , _lowercase :int=0.1 , _lowercase :List[Any]=512 , _lowercase :Any=0.02 , _lowercase :Dict=1E-1_2 , _lowercase :int=0 , _lowercase :Any="absolute" , _lowercase :Optional[int]=2 , _lowercase :Optional[int]=1408 , **_lowercase :List[Any] , ) -> str:
super().__init__(pad_token_id=_lowercase , **_lowercase)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = cross_attention_frequency
UpperCAmelCase_ = encoder_hidden_size
@classmethod
def __a ( cls :Any , _lowercase :Union[str, os.PathLike] , **_lowercase :int) -> "PretrainedConfig":
cls._set_token_in_kwargs(_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_lowercase , **_lowercase)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''') == "instructblip":
UpperCAmelCase_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(_lowercase , **_lowercase)
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] ="instructblip"
UpperCamelCase__ : int =True
def __init__( self :List[Any] , _lowercase :Tuple=None , _lowercase :Tuple=None , _lowercase :Any=None , _lowercase :Any=32 , **_lowercase :Union[str, Any]) -> List[str]:
super().__init__(**_lowercase)
if vision_config is None:
UpperCAmelCase_ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''')
if qformer_config is None:
UpperCAmelCase_ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''')
if text_config is None:
UpperCAmelCase_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
UpperCAmelCase_ = InstructBlipVisionConfig(**_lowercase)
UpperCAmelCase_ = InstructBlipQFormerConfig(**_lowercase)
UpperCAmelCase_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCAmelCase_ = CONFIG_MAPPING[text_model_type](**_lowercase)
UpperCAmelCase_ = self.text_config.tie_word_embeddings
UpperCAmelCase_ = self.text_config.is_encoder_decoder
UpperCAmelCase_ = num_query_tokens
UpperCAmelCase_ = self.vision_config.hidden_size
UpperCAmelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.02
@classmethod
def __a ( cls :Tuple , _lowercase :InstructBlipVisionConfig , _lowercase :InstructBlipQFormerConfig , _lowercase :PretrainedConfig , **_lowercase :str , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.qformer_config.to_dict()
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 561 | 0 |
from ... import PretrainedConfig
__a : Union[str, Any] = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Union[str, Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a : int = '''nezha'''
def __init__( self , lowerCamelCase=21128 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=64 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=0.1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase__ : int = vocab_size
lowercase__ : str = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Tuple = max_relative_position
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Optional[int] = classifier_dropout
lowercase__ : Optional[int] = use_cache
| 397 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : int = '''decision_transformer'''
__lowerCamelCase : Optional[Any] = ['''past_key_values''']
__lowerCamelCase : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,SCREAMING_SNAKE_CASE_=17 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=4096 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_="relu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=50256 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Any = state_dim
snake_case : Optional[Any] = act_dim
snake_case : Union[str, Any] = hidden_size
snake_case : Any = max_ep_len
snake_case : int = action_tanh
snake_case : Any = vocab_size
snake_case : Any = n_positions
snake_case : List[str] = n_layer
snake_case : int = n_head
snake_case : Optional[int] = n_inner
snake_case : List[Any] = activation_function
snake_case : Tuple = resid_pdrop
snake_case : Optional[Any] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : List[str] = layer_norm_epsilon
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[Any] = scale_attn_weights
snake_case : str = use_cache
snake_case : int = scale_attn_by_inverse_layer_idx
snake_case : Tuple = reorder_and_upcast_attn
snake_case : Tuple = bos_token_id
snake_case : List[str] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Tuple , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: int ) -> Optional[int]:
if index == r:
for j in range(__UpperCAmelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase__ : Optional[Any] = arr[i]
combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: int ) -> str:
# A temporary array to store all combination one by one
UpperCamelCase__ : List[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 0 , __UpperCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 369 |
from __future__ import annotations
from math import gcd
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int = 2 , __UpperCAmelCase: int = 1 , __UpperCAmelCase: int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
UpperCamelCase__ : List[Any] = seed
UpperCamelCase__ : List[str] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCamelCase__ : Optional[int] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[int] = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCamelCase__ : Optional[int] = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCamelCase__ : List[str] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
UpperCAmelCase_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 369 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_snake_case : str = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , ):
output_path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCamelCase , __lowerCamelCase , f=output_path.as_posix() , input_names=__lowerCamelCase , output_names=__lowerCamelCase , dynamic_axes=__lowerCamelCase , do_constant_folding=__lowerCamelCase , use_external_data_format=__lowerCamelCase , enable_onnx_checker=__lowerCamelCase , opset_version=__lowerCamelCase , )
else:
export(
__lowerCamelCase , __lowerCamelCase , f=output_path.as_posix() , input_names=__lowerCamelCase , output_names=__lowerCamelCase , dynamic_axes=__lowerCamelCase , do_constant_folding=__lowerCamelCase , opset_version=__lowerCamelCase , )
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ):
__snake_case : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__snake_case : str = "cpu"
__snake_case : List[Any] = Path(__lowerCamelCase )
# VAE DECODER
__snake_case : Tuple = AutoencoderKL.from_pretrained(model_path + "/vae" )
__snake_case : str = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case : Tuple = vae_decoder.decode
onnx_export(
__lowerCamelCase , model_args=(
torch.randn(1 , __lowerCamelCase , 2_5 , 2_5 ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowerCamelCase , )
del vae_decoder
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_snake_case : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 81 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = data
def __iter__( self ) -> Tuple:
"""simple docstring"""
for element in self.data:
yield element
def _lowercase ( __lowerCAmelCase=True ) -> str:
SCREAMING_SNAKE_CASE__ : str = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[int]:
if iterable:
SCREAMING_SNAKE_CASE__ : int = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : str = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = accelerator.prepare(__lowerCAmelCase )
return dl
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowercase ( ) -> str:
SCREAMING_SNAKE_CASE__ : List[str] = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ : int = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ : List[Any] = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowercase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Any = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.prepare(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ : List[Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ : str = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = create_accelerator(even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowercase ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str = create_accelerator()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = original_state
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self) -> List[Any]:
"""simple docstring"""
a_ =[]
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> int:
"""simple docstring"""
self.events.append("on_init_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
self.events.append("on_train_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Dict:
"""simple docstring"""
self.events.append("on_train_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
self.events.append("on_epoch_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_epoch_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
self.events.append("on_step_begin")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Any:
"""simple docstring"""
self.events.append("on_step_end")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_evaluate")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Tuple:
"""simple docstring"""
self.events.append("on_predict")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> str:
"""simple docstring"""
self.events.append("on_save")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
self.events.append("on_log")
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
self.events.append("on_prediction_step")
@require_torch
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ =tempfile.mkdtemp()
def lowercase_ ( self) -> Any:
"""simple docstring"""
shutil.rmtree(self.output_dir)
def lowercase_ ( self , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=6_4 , lowerCAmelCase_=6_4 , lowerCAmelCase_=None , lowerCAmelCase_=False , **lowerCAmelCase_) -> Union[str, Any]:
"""simple docstring"""
a_ =RegressionDataset(length=lowerCAmelCase_)
a_ =RegressionDataset(length=lowerCAmelCase_)
a_ =RegressionModelConfig(a=lowerCAmelCase_ , b=lowerCAmelCase_)
a_ =RegressionPreTrainedModel(lowerCAmelCase_)
a_ =TrainingArguments(self.output_dir , disable_tqdm=lowerCAmelCase_ , report_to=[] , **lowerCAmelCase_)
return Trainer(
lowerCAmelCase_ , lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , callbacks=lowerCAmelCase_ , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
# Order doesn't matter
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cb.__class__.__name__)
a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else cb.__class__.__name__)
for cba, cba in zip(lowerCAmelCase_ , lowerCAmelCase_):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , cba.__class__)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(cba.__class__ , lowerCAmelCase_)
else:
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ =["on_init_end", "on_train_begin"]
a_ =0
a_ =len(trainer.get_eval_dataloader())
a_ =["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(lowerCAmelCase_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.get_trainer()
a_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# Callbacks passed at init are added to the default callbacks
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
a_ =self.get_trainer(disable_tqdm=lowerCAmelCase_)
a_ =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
a_ =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCAmelCase_)
expected_callbacks.remove(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
a_ =self.get_trainer()
a_ =trainer.pop_callback(lowerCAmelCase_)
self.assertEqual(cb.__class__ , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
trainer.add_callback(lowerCAmelCase_)
expected_callbacks.insert(0 , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
# We can also add, pop, or remove by instance
a_ =self.get_trainer()
a_ =trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCAmelCase_)
expected_callbacks.remove(lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
a_ =self.get_trainer()
a_ =trainer.callback_handler.callbacks[0]
a_ =trainer.pop_callback(lowerCAmelCase_)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
trainer.add_callback(lowerCAmelCase_)
expected_callbacks.insert(0 , lowerCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=lowerCAmelCase_)
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# Independent log/save/eval
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps")
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
a_ =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch")
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# A bit of everything
a_ =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
a_ =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
a_ =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCAmelCase_) in warn_mock.call_args[0][0]
| 41 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ =os.path.dirname(os.path.realpath(lowercase__ ) )
a_ =os.path.join(lowercase__ , "words.txt" )
a_ =""
with open(lowercase__ ) as f:
a_ =f.readline()
a_ =[word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
a_ =[
word
for word in [sum(ord(lowercase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase__ )
if __name__ == "__main__":
print(solution())
| 41 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCAmelCase_ = BigBirdConfig.from_json_file(__UpperCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
UpperCAmelCase_ = BigBirdForQuestionAnswering(__UpperCamelCase )
else:
UpperCAmelCase_ = BigBirdForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__UpperCamelCase , __UpperCamelCase , is_trivia_qa=__UpperCamelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 144 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) -> Optional[Any]:
if height >= 1:
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
move_disk(__UpperCamelCase , __UpperCamelCase )
move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ) -> List[str]:
print('''moving disk from''' , __UpperCamelCase , '''to''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
UpperCAmelCase_ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__UpperCamelCase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 144 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : List[str] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''funnel'''
UpperCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self :Any , __UpperCamelCase :List[str]=3_05_22 , __UpperCamelCase :List[str]=[4, 4, 4] , __UpperCamelCase :Union[str, Any]=None , __UpperCamelCase :List[str]=2 , __UpperCamelCase :List[str]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :Optional[Any]=64 , __UpperCamelCase :Optional[Any]=30_72 , __UpperCamelCase :Any="gelu_new" , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Any=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :List[str]=None , __UpperCamelCase :Optional[int]=1e-9 , __UpperCamelCase :List[Any]="mean" , __UpperCamelCase :List[Any]="relative_shift" , __UpperCamelCase :Tuple=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :int=True , **__UpperCamelCase :Any , ):
A = vocab_size
A = block_sizes
A = [1] * len(__UpperCamelCase ) if block_repeats is None else block_repeats
assert len(__UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A = num_decoder_layers
A = d_model
A = n_head
A = d_head
A = d_inner
A = hidden_act
A = hidden_dropout
A = attention_dropout
A = activation_dropout
A = initializer_range
A = initializer_std
A = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
A = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
A = attention_type
A = separate_cls
A = truncate_seq
A = pool_q_only
super().__init__(**__UpperCamelCase )
@property
def lowerCamelCase ( self :List[Any] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :List[Any] ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def lowerCamelCase ( self :str ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 524 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = 42
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Tuple , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :int=("DownEncoderBlock2D",) , __UpperCamelCase :Dict=(64,) , __UpperCamelCase :str=2 , __UpperCamelCase :List[str]=32 , __UpperCamelCase :str="silu" , __UpperCamelCase :str=True , ):
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
__UpperCamelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCamelCase ):
A = output_channel
A = block_out_channels[i]
A = i == len(__UpperCamelCase ) - 1
A = get_down_block(
__UpperCamelCase , num_layers=self.layers_per_block , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__UpperCamelCase , resnet_groups=__UpperCamelCase , attention_head_dim=__UpperCamelCase , temb_channels=__UpperCamelCase , )
self.down_blocks.append(__UpperCamelCase )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCamelCase , temb_channels=__UpperCamelCase , )
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCamelCase , eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] , __UpperCamelCase , 3 , padding=1 )
A = False
def lowerCamelCase ( self :Tuple , __UpperCamelCase :str ):
A = x
A = self.conv_in(__UpperCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCamelCase :List[str] ):
def custom_forward(*__UpperCamelCase :Optional[Any] ):
return module(*__UpperCamelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCamelCase ) , __UpperCamelCase , use_reentrant=__UpperCamelCase )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCamelCase , use_reentrant=__UpperCamelCase )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCamelCase ) , __UpperCamelCase )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCamelCase )
else:
# down
for down_block in self.down_blocks:
A = down_block(__UpperCamelCase )
# middle
A = self.mid_block(__UpperCamelCase )
# post-process
A = self.conv_norm_out(__UpperCamelCase )
A = self.conv_act(__UpperCamelCase )
A = self.conv_out(__UpperCamelCase )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self :int , __UpperCamelCase :List[str]=3 , __UpperCamelCase :int=3 , __UpperCamelCase :str=("UpDecoderBlock2D",) , __UpperCamelCase :Tuple=(64,) , __UpperCamelCase :Union[str, Any]=2 , __UpperCamelCase :str=32 , __UpperCamelCase :Union[str, Any]="silu" , __UpperCamelCase :int="group" , ):
super().__init__()
A = layers_per_block
A = nn.Convad(
__UpperCamelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == "spatial" else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCamelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCamelCase , temb_channels=__UpperCamelCase , )
# up
A = list(reversed(__UpperCamelCase ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCamelCase ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(__UpperCamelCase ) - 1
A = get_up_block(
__UpperCamelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__UpperCamelCase , resnet_groups=__UpperCamelCase , attention_head_dim=__UpperCamelCase , temb_channels=__UpperCamelCase , resnet_time_scale_shift=__UpperCamelCase , )
self.up_blocks.append(__UpperCamelCase )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] , __UpperCamelCase )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCamelCase , eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] , __UpperCamelCase , 3 , padding=1 )
A = False
def lowerCamelCase ( self :str , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any]=None ):
A = z
A = self.conv_in(__UpperCamelCase )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCamelCase :Union[str, Any] ):
def custom_forward(*__UpperCamelCase :Union[str, Any] ):
return module(*__UpperCamelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCamelCase , __UpperCamelCase , use_reentrant=__UpperCamelCase )
A = sample.to(__UpperCamelCase )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase , use_reentrant=__UpperCamelCase )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __UpperCamelCase , __UpperCamelCase )
A = sample.to(__UpperCamelCase )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase )
else:
# middle
A = self.mid_block(__UpperCamelCase , __UpperCamelCase )
A = sample.to(__UpperCamelCase )
# up
for up_block in self.up_blocks:
A = up_block(__UpperCamelCase , __UpperCamelCase )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(__UpperCamelCase )
else:
A = self.conv_norm_out(__UpperCamelCase , __UpperCamelCase )
A = self.conv_act(__UpperCamelCase )
A = self.conv_out(__UpperCamelCase )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Tuple , __UpperCamelCase :List[Any] , __UpperCamelCase :List[str]=None , __UpperCamelCase :Union[str, Any]="random" , __UpperCamelCase :int=False , __UpperCamelCase :Dict=True ):
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
A = n_e
A = sane_index_shape
def lowerCamelCase ( self :Any , __UpperCamelCase :Union[str, Any] ):
A = inds.shape
assert len(__UpperCamelCase ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(__UpperCamelCase )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(__UpperCamelCase )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Union[str, Any] ):
A = inds.shape
assert len(__UpperCamelCase ) > 1
A = inds.reshape(ishape[0] , -1 )
A = self.used.to(__UpperCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCamelCase )
return back.reshape(__UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 , 2 , 3 , 1 ).contiguous()
A = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(__UpperCamelCase , self.embedding.weight ) , dim=1 )
A = self.embedding(__UpperCamelCase ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A = self.remap_to_used(__UpperCamelCase )
A = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] , -1 ) # add batch axis
A = self.unmap_to_all(__UpperCamelCase )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(__UpperCamelCase )
if shape is not None:
A = z_q.view(__UpperCamelCase )
# reshape back to match original input shape
A = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :int , __UpperCamelCase :Dict=False ):
A = parameters
A, A = torch.chunk(__UpperCamelCase , 2 , dim=1 )
A = torch.clamp(self.logvar , -30.0 , 20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape , generator=__UpperCamelCase , device=self.parameters.device , dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Any , __UpperCamelCase :Tuple=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
return self.mean
| 524 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__a : List[Any] = get_tests_dir("fixtures")
__a : List[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__a : Dict = get_tests_dir("fixtures/dummy-config.json")
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = 0
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__A = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__A = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__A = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__A = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__A = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase_ ):
__A = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__A = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__A = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__A = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__A = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__A = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__A = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__A = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 637 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __lowercase ( lowercase_ ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = tempfile.mkdtemp()
__A = 5
# Realm tok
__A = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__A = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__A = os.path.join(UpperCamelCase_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__A = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=UpperCamelCase_ , )
return block_records
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3] , dtype="""long""" )
__A = tokenizer(["""Test question"""] ).input_ids
__A = tokenizer(
["""the fourth"""] , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
UpperCamelCase_ , UpperCamelCase_ , answer_ids=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors="""np""" )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3, 5] , dtype="""long""" )
__A = tokenizer(["""Test question"""] ).input_ids
__A = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
UpperCamelCase_ , UpperCamelCase_ , answer_ids=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , UpperCamelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCamelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__A = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__A = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__A = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 637 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : str = CTRLTokenizer
__lowercase : Any = False
__lowercase : Optional[int] = False
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
UpperCamelCase = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __UpperCamelCase ( self , **A_ ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = 'adapt react readapt apt'
UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
UpperCamelCase = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 720 |
import os
_UpperCAmelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def A ( lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = 0
while index < len(lowercase ) - 1:
UpperCamelCase = SYMBOLS[numerals[index]]
UpperCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
UpperCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = 0
with open(os.path.dirname(lowercase ) + roman_numerals_filename ) as filea:
UpperCamelCase = filea.readlines()
for line in lines:
UpperCamelCase = line.strip()
UpperCamelCase = parse_roman_numerals(lowercase )
UpperCamelCase = generate_roman_numerals(lowercase )
savings += len(lowercase ) - len(lowercase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCAmelCase , 2 ) + pow(_lowerCAmelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : Tuple,__A : Optional[int],__A : Optional[int]=1_0_2_4,__A : int=1_0_2_4,__A : Any=3.6 ):
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : Dict = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Tuple ):
_lowerCamelCase : Union[str, Any] = iter(self.dataset )
_lowerCamelCase : str = True
while more_examples:
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__A )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : int = tokenizer(__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0,len(__A ),self.seq_length ):
_lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(__A ) == self.seq_length:
yield torch.tensor(__A )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {"streaming": True}
_lowerCamelCase : Optional[Any] = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
_lowerCamelCase : int = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
_lowerCamelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
model.eval()
_lowerCamelCase : Optional[int] = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Dict = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
_lowerCamelCase : List[Any] = torch.exp(_lowerCAmelCase )
except OverflowError:
_lowerCamelCase : Optional[int] = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase_ : List[str] = Accelerator()
# Parse configuration
UpperCAmelCase_ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase_ : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase_, UpperCAmelCase_ : Dict = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
UpperCAmelCase_, UpperCAmelCase_ : str = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 44 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : pyspark.sql.DataFrame , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "arrow" , **UpperCAmelCase__ : str , ) ->Tuple:
'''simple docstring'''
super().__init__(
split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = load_from_cache_file
A__ = file_format
A__ = Spark(
df=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , working_dir=UpperCAmelCase__ , **UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
A__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 177 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = factorial(lowercase_ )
A__ = split_and_add(lowercase_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 177 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = cva.getAffineTransform(UpperCAmelCase__ ,UpperCAmelCase__ )
return cva.warpAffine(UpperCAmelCase__ ,UpperCAmelCase__ ,(rows, cols) )
if __name__ == "__main__":
# read original image
snake_case : int = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
snake_case : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case , snake_case : Tuple = gray_img.shape
# set different points to rotate image
snake_case : Any = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
snake_case : List[str] = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
snake_case : int = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
snake_case : int = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
snake_case : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case : List[str] = plt.figure(1)
snake_case : int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 605 |
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
_SCREAMING_SNAKE_CASE = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_SCREAMING_SNAKE_CASE = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Any ):
__lowerCamelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCamelCase : Any = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__lowerCamelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = generator.manual_seed(0 )
__lowerCamelCase : Dict = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self: int ):
__lowerCamelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = 'cyberpunk 2077'
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCamelCase : List[Any] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__lowerCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger '
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowerCamelCase : List[str] = pipe.image_variation(a , generator=a , output_type='numpy' ).images
__lowerCamelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Dict = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 230 | 0 |
'''simple docstring'''
import random
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ) -> Any:
__snake_case : Union[str, Any] = a[left_index]
__snake_case : List[str] = left_index + 1
for j in range(left_index + 1 ,_UpperCAmelCase ):
if a[j] < pivot:
__snake_case , __snake_case : List[Any] = a[i], a[j]
i += 1
__snake_case , __snake_case : Optional[int] = a[i - 1], a[left_index]
return i - 1
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
if left < right:
__snake_case : List[str] = random.randint(_UpperCAmelCase ,right - 1 )
__snake_case , __snake_case : Dict = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__snake_case : List[Any] = partition(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
quick_sort_random(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCAmelCase ,pivot_index + 1 ,_UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def a_ ( ) -> Tuple:
__snake_case : Optional[int] = input('Enter numbers separated by a comma:\n' ).strip()
__snake_case : List[str] = [int(_UpperCAmelCase ) for item in user_input.split(',' )]
quick_sort_random(_UpperCAmelCase ,0 ,len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 286 |
'''simple docstring'''
import functools
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : list[int] ) -> int:
# Validation
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_UpperCAmelCase ) == 0:
return 0
if min(_UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_UpperCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
__snake_case : str = set(_UpperCAmelCase )
@functools.cache
def dynamic_programming(_UpperCAmelCase : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 | 1 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase : int = True
except ImportError:
UpperCAmelCase : Tuple = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase : int = _get_torch_home()
except ImportError:
UpperCAmelCase : Tuple = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
UpperCAmelCase : Tuple = os.path.join(torch_cache_home, '''transformers''')
UpperCAmelCase : Dict = '''https://cdn.huggingface.co'''
UpperCAmelCase : Any = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
UpperCAmelCase : List[Any] = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
UpperCAmelCase : List[str] = os.path.join(PATH, '''config.yaml''')
UpperCAmelCase : Optional[Any] = os.path.join(PATH, '''attributes.txt''')
UpperCAmelCase : Union[str, Any] = os.path.join(PATH, '''objects.txt''')
UpperCAmelCase : int = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
UpperCAmelCase : int = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase : Union[str, Any] = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase : List[str] = '''pytorch_model.bin'''
UpperCAmelCase : List[Any] = '''config.yaml'''
def _SCREAMING_SNAKE_CASE ( a=OBJECTS , a=ATTRIBUTES ) -> List[str]:
__A : Optional[int] = []
with open(a ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__A : Union[str, Any] = []
with open(a ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : List[str] = OrderedDict()
with open(a , 'rb' ) as f:
__A : Dict = pkl.load(a )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__A : Tuple = ckp.pop(a )
if isinstance(a , np.ndarray ):
__A : Dict = torch.tensor(a )
else:
assert isinstance(a , torch.tensor ), type(a )
__A : Dict = v
return r
class _A:
"""simple docstring"""
UpperCamelCase : Dict = {}
def __init__( self , _A , _A = "root" , _A=0 ):
__A : Union[str, Any] = name
__A : int = level
__A : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__A : Union[str, Any] = copy.deepcopy(_A )
__A : int = copy.deepcopy(_A )
if isinstance(_A , _A ):
__A : Optional[int] = Config(_A , name=_A , level=level + 1 )
__A : List[str] = v
setattr(self , _A , _A )
__A : Optional[Any] = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _A , _A ):
__A : List[Any] = val
__A : Any = val
__A : Tuple = key.split('.' )
__A : List[Any] = len(_A ) - 1
__A : Dict = self._pointer
if len(_A ) > 1:
for i, l in enumerate(_A ):
if hasattr(self , _A ) and isinstance(getattr(self , _A ) , _A ):
setattr(getattr(self , _A ) , '.'.join(levels[i:] ) , _A )
if l == last_level:
__A : Tuple = val
else:
__A : List[Any] = pointer[l]
def UpperCAmelCase_ ( self ):
return self._pointer
def UpperCAmelCase_ ( self , _A , _A ):
with open(F"""{file_name}""" , 'w' ) as stream:
dump(_A , _A )
def UpperCAmelCase_ ( self , _A , _A ):
with open(F"""{file_name}""" , 'w' ) as stream:
json.dump(_A , _A )
@staticmethod
def UpperCAmelCase_ ( _A ):
with open(_A ) as stream:
__A : str = load(_A , Loader=_A )
return data
def __str__( self ):
__A : int = ' '
if self._name != "root":
__A : Any = F"""{t * (self._level-1)}{self._name}:\n"""
else:
__A : Any = ''
__A : List[str] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_A , _A ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(_A ).__name__})\n"""
__A : List[str] = level
return r[:-1]
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
__A : Union[str, Any] = cls.get_config_dict(_A , **_A )
return cls(_A )
@classmethod
def UpperCAmelCase_ ( cls , _A , **_A ):
__A : Dict = kwargs.pop('cache_dir' , _A )
__A : Tuple = kwargs.pop('force_download' , _A )
__A : str = kwargs.pop('resume_download' , _A )
__A : List[Any] = kwargs.pop('proxies' , _A )
__A : Dict = kwargs.pop('local_files_only' , _A )
if os.path.isdir(_A ):
__A : int = os.path.join(_A , _A )
elif os.path.isfile(_A ) or is_remote_url(_A ):
__A : int = pretrained_model_name_or_path
else:
__A : List[str] = hf_bucket_url(_A , filename=_A , use_cdn=_A )
try:
# Load from URL or cache if already cached
__A : Union[str, Any] = cached_path(
_A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__A : List[str] = Config.load_yaml(_A )
except EnvironmentError:
__A : int = 'Can\'t load config for'
raise EnvironmentError(_A )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_A ), kwargs
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : List[Any] = torch.load('dump.pt' , map_location=in_tensor.device )
__A : Any = in_tensor.numpy()
__A : Optional[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(a , a , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(a , a , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : List[str] = urlparse(a )
return parsed.scheme in ("http", "https")
def _SCREAMING_SNAKE_CASE ( a , a , a=True ) -> str:
__A : Dict = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__A : List[Any] = '/' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def _SCREAMING_SNAKE_CASE ( a , a , a=None , a=0 , a=None , ) -> str:
__A : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(a , a ):
ua += "; " + "; ".join('{}/{}'.format(a , a ) for k, v in user_agent.items() )
elif isinstance(a , a ):
ua += "; " + user_agent
__A : Tuple = {'user-agent': ua}
if resume_size > 0:
__A : str = 'bytes=%d-' % (resume_size,)
__A : List[Any] = requests.get(a , stream=a , proxies=a , headers=a )
if response.status_code == 4_16: # Range not satisfiable
return
__A : List[str] = response.headers.get('Content-Length' )
__A : Optional[int] = resume_size + int(a ) if content_length is not None else None
__A : int = tqdm(
unit='B' , unit_scale=a , total=a , initial=a , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(a ) )
temp_file.write(a )
progress.close()
def _SCREAMING_SNAKE_CASE ( a , a=None , a=False , a=None , a=10 , a=False , a=None , a=False , ) -> Optional[Any]:
if cache_dir is None:
__A : Tuple = TRANSFORMERS_CACHE
if isinstance(a , a ):
__A : Dict = str(a )
os.makedirs(a , exist_ok=a )
__A : int = None
if not local_files_only:
try:
__A : Dict = requests.head(a , allow_redirects=a , proxies=a , timeout=a )
if response.status_code == 2_00:
__A : List[Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__A : Any = url_to_filename(a , a )
# get cache path to put the file
__A : List[str] = os.path.join(a , a )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(a ):
return cache_path
else:
__A : str = [
file
for file in fnmatch.filter(os.listdir(a ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(a ) > 0:
return os.path.join(a , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(a ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__A : List[Any] = cache_path + '.lock'
with FileLock(a ):
# If the download just completed while the lock was activated.
if os.path.exists(a ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__A : Union[str, Any] = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(a , 'a+b' ) as f:
yield f
__A : str = _resumable_file_manager
if os.path.exists(a ):
__A : List[str] = os.stat(a ).st_size
else:
__A : str = 0
else:
__A : List[Any] = partial(tempfile.NamedTemporaryFile , dir=a , delete=a )
__A : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , a , temp_file.name , )
http_get(
a , a , proxies=a , resume_size=a , user_agent=a , )
os.replace(temp_file.name , a )
__A : Dict = {'url': url, 'etag': etag}
__A : str = cache_path + '.json'
with open(a , 'w' ) as meta_file:
json.dump(a , a )
return cache_path
def _SCREAMING_SNAKE_CASE ( a , a=None ) -> Optional[Any]:
__A : str = url.encode('utf-8' )
__A : Union[str, Any] = shaaaa(a )
__A : Optional[int] = url_hash.hexdigest()
if etag:
__A : Tuple = etag.encode('utf-8' )
__A : List[Any] = shaaaa(a )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def _SCREAMING_SNAKE_CASE ( a , a=None , a=False , a=None , a=False , a=None , a=False , a=False , a=False , ) -> Optional[Any]:
if cache_dir is None:
__A : str = TRANSFORMERS_CACHE
if isinstance(a , a ):
__A : List[str] = str(a )
if isinstance(a , a ):
__A : List[Any] = str(a )
if is_remote_url(a ):
# URL, so get it from the cache (downloading if necessary)
__A : Optional[Any] = get_from_cache(
a , cache_dir=a , force_download=a , proxies=a , resume_download=a , user_agent=a , local_files_only=a , )
elif os.path.exists(a ):
# File, and it exists.
__A : List[str] = url_or_filename
elif urlparse(a ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(a ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(a ) )
if extract_compressed_file:
if not is_zipfile(a ) and not tarfile.is_tarfile(a ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__A : Optional[int] = os.path.split(a )
__A : Union[str, Any] = output_file.replace('.' , '-' ) + '-extracted'
__A : Dict = os.path.join(a , a )
if os.path.isdir(a ) and os.listdir(a ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__A : List[Any] = output_path + '.lock'
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
os.makedirs(a )
if is_zipfile(a ):
with ZipFile(a , 'r' ) as zip_file:
zip_file.extractall(a )
zip_file.close()
elif tarfile.is_tarfile(a ):
__A : int = tarfile.open(a )
tar_file.extractall(a )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(a ) )
return output_path_extracted
return output_path
def _SCREAMING_SNAKE_CASE ( a , a="," ) -> List[str]:
assert isinstance(a , a )
if os.path.isfile(a ):
with open(a ) as f:
__A : Optional[int] = eval(f.read() )
else:
__A : List[Any] = requests.get(a )
try:
__A : Tuple = requests.json()
except Exception:
__A : str = req.content.decode()
assert data is not None, "could not connect"
try:
__A : int = eval(a )
except Exception:
__A : int = data.split('\n' )
req.close()
return data
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[str] = requests.get(a )
__A : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
__A : Dict = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(a )
with open(a , 'rb' ) as stream:
__A : Any = pkl.load(a )
__A : Dict = weights.pop('model' )
__A : Any = {}
for k, v in model.items():
__A : int = torch.from_numpy(a )
if "running_var" in k:
__A : Union[str, Any] = torch.tensor([0] )
__A : Optional[Any] = k.replace('running_var' , 'num_batches_tracked' )
__A : int = zero
return new
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
print(F"""{os.path.abspath(os.path.join(a , os.pardir ) )}/demo.ipynb""" )
def _SCREAMING_SNAKE_CASE ( a , a="RGB" ) -> Tuple:
assert isinstance(a , a )
if os.path.isfile(a ):
__A : Any = cva.imread(a )
else:
__A : List[str] = get_image_from_url(a )
assert img is not None, F"""could not connect to: {im}"""
__A : Any = cva.cvtColor(a , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__A : Union[str, Any] = img[:, :, ::-1]
return img
def _SCREAMING_SNAKE_CASE ( a , a=1 ) -> Dict:
return (images[i : i + batch] for i in range(0 , len(a ) , a ))
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__snake_case = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__snake_case = 0
__snake_case = 0XE000
__snake_case = 0XE001
__snake_case = 0XE002
__snake_case = 0XE003
__snake_case = 0XE004
# Maps special codepoints to human-readable names.
__snake_case = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__snake_case = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( A__ ):
"""simple docstring"""
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase_=chr(UpperCamelCase_ ) , UpperCamelCase_=chr(UpperCamelCase_ ) , UpperCamelCase_=chr(UpperCamelCase_ ) , UpperCamelCase_=chr(UpperCamelCase_ ) , UpperCamelCase_=chr(UpperCamelCase_ ) , UpperCamelCase_=chr(UpperCamelCase_ ) , UpperCamelCase_=False , UpperCamelCase_=2048 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
UpperCamelCase__ :Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
UpperCamelCase__ :List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
UpperCamelCase__ :Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
UpperCamelCase__ :Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ :List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , model_max_length=UpperCamelCase_ , **UpperCamelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase__ :Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase__ :Any = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase__ :Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase__ :Any = UNICODE_VOCAB_SIZE
UpperCamelCase__ :Optional[Any] = len(self._special_codepoints )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._unicode_vocab_size
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return list(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
try:
return ord(UpperCamelCase_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Any = [self.sep_token_id]
UpperCamelCase__ :Tuple = [self.cls_token_id]
UpperCamelCase__ :Any = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase__ :Any = [1] + ([0] * len(UpperCamelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase_ )) + [1]
return result
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :int = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
UpperCamelCase__ :str = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
return ()
| 189 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :Tuple = 8
else:
UpperCamelCase__ :int = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=__a )
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def a ( __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :List[Any] = config['''lr''']
UpperCamelCase__ :List[str] = int(config['''num_epochs'''] )
UpperCamelCase__ :int = int(config['''seed'''] )
UpperCamelCase__ :str = int(config['''batch_size'''] )
UpperCamelCase__ :List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase__ :Tuple = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase__ :Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase__ :Dict = MAX_GPU_BATCH_SIZE
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Tuple = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase__ :Dict = model(**__a )
UpperCamelCase__ :Union[str, Any] = outputs.loss
UpperCamelCase__ :Tuple = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :int = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :int = parser.parse_args()
UpperCamelCase__ :int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 189 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'rwkv'
A__ = {'max_position_embeddings': 'context_length'}
def __init__( self , __UpperCAmelCase=5_0277 , __UpperCAmelCase=1024 , __UpperCAmelCase=4096 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=6 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Union[str, Any] = context_length
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCAmelCase__ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCAmelCase__ : Dict = layer_norm_epsilon
lowerCAmelCase__ : Union[str, Any] = rescale_every
lowerCAmelCase__ : List[str] = use_cache
lowerCAmelCase__ : Dict = bos_token_id
lowerCAmelCase__ : List[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 712 |
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __lowerCAmelCase ( ) -> List[Any]:
lowerCAmelCase__ : str = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase__ : str = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase__ : Optional[Any] = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : int = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 470 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.