code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = tempfile.mkdtemp()
# fmt: off
snake_case_ : Optional[int] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ : Tuple = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case_ : List[str] = {"""unk_token""": """<unk>"""}
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
snake_case_ : Optional[Any] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
snake_case_ : Tuple = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Any:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case_ : Optional[int] = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Union[str, Any] = self.get_rust_tokenizer()
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
snake_case_ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ : List[str] = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
snake_case_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : str = self.prepare_image_inputs()
snake_case_ : Union[str, Any] = image_processor(_lowercase , return_tensors="""np""" )
snake_case_ : Any = processor(images=_lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : int = """lower newer"""
snake_case_ : List[str] = processor(text=_lowercase )
snake_case_ : Optional[int] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = self.get_image_processor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : Union[str, Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : str = """lower newer"""
snake_case_ : Tuple = self.prepare_image_inputs()
snake_case_ : Dict = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.batch_decode(_lowercase )
snake_case_ : Any = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : List[Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case_ : Union[str, Any] = """lower newer"""
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 709 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
__lowerCAmelCase : Optional[int] = tf.data.AUTOTUNE
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=__UpperCamelCase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=__UpperCamelCase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=__UpperCamelCase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=__UpperCamelCase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=__UpperCamelCase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=__UpperCamelCase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=__UpperCamelCase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=__UpperCamelCase , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=__UpperCamelCase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=__UpperCamelCase , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=__UpperCamelCase , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=__UpperCamelCase , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=__UpperCamelCase , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=__UpperCamelCase , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ : List[Any] = parser.parse_args()
return args
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
try:
if args.tpu_name:
snake_case_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(__UpperCamelCase )
return tpu
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[int] = 0
for file in file_list:
snake_case_ : List[str] = file.split("""/""" )[-1]
snake_case_ : Tuple = re.search(r"""-\d+-(\d+)\.tfrecord""" , __UpperCamelCase ).group(1 )
snake_case_ : int = int(__UpperCamelCase )
num_samples += sample_count
return num_samples
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=None ):
'''simple docstring'''
snake_case_ : Tuple = count_samples(__UpperCamelCase )
snake_case_ : List[Any] = tf.data.Dataset.from_tensor_slices(__UpperCamelCase )
if shuffle:
snake_case_ : Union[str, Any] = dataset.shuffle(len(__UpperCamelCase ) )
snake_case_ : Dict = tf.data.TFRecordDataset(__UpperCamelCase , num_parallel_reads=__UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ : List[str] = dataset.apply(tf.data.experimental.assert_cardinality(__UpperCamelCase ) )
snake_case_ : int = dataset.map(__UpperCamelCase , num_parallel_calls=__UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ : str = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ : Optional[int] = dataset.batch(__UpperCamelCase , drop_remainder=__UpperCamelCase )
snake_case_ : Union[str, Any] = dataset.map(__UpperCamelCase , num_parallel_calls=__UpperCamelCase )
snake_case_ : Dict = dataset.prefetch(__UpperCamelCase )
return dataset
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
if not args.no_tpu:
snake_case_ : List[Any] = initialize_tpu(__UpperCamelCase )
snake_case_ : Dict = tf.distribute.TPUStrategy(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ : Optional[int] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ : int = tokenizer.vocab_size
snake_case_ : List[str] = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
snake_case_ : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
snake_case_ : Optional[int] = count_samples(__UpperCamelCase )
snake_case_ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ : List[str] = TFAutoModelForMaskedLM.from_config(__UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ : Union[str, Any] = create_optimizer(
num_train_steps=__UpperCamelCase , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__UpperCamelCase , metrics=["""accuracy"""] )
def decode_fn(__UpperCamelCase : Union[str, Any] ):
snake_case_ : Tuple = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__UpperCamelCase , __UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ : int = DataCollatorForLanguageModeling(
tokenizer=__UpperCamelCase , mlm_probability=args.mlm_probability , mlm=__UpperCamelCase , return_tensors="""tf""" )
def mask_with_collator(__UpperCamelCase : Tuple ):
# TF really needs an isin() function
snake_case_ : Optional[Any] = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ : int = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(__UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__UpperCamelCase , )
return batch
snake_case_ : Optional[int] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ : Any = prepare_dataset(
__UpperCamelCase , decode_fn=__UpperCamelCase , mask_fn=__UpperCamelCase , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ : List[Any] = prepare_dataset(
__UpperCamelCase , decode_fn=__UpperCamelCase , mask_fn=__UpperCamelCase , batch_size=__UpperCamelCase , shuffle=__UpperCamelCase , )
snake_case_ : List[str] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__UpperCamelCase ) )
model.fit(
__UpperCamelCase , validation_data=__UpperCamelCase , epochs=args.num_epochs , callbacks=__UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : int = parse_args()
main(args)
| 710 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=2 , _lowercase=True , _lowercase=False , _lowercase=1_0 , _lowercase=3 , _lowercase=3_2 * 8 , _lowercase=3_2 * 8 , _lowercase=4 , _lowercase=6_4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = is_training
snake_case_ : int = use_auxiliary_loss
snake_case_ : List[str] = num_queries
snake_case_ : Tuple = num_channels
snake_case_ : Any = min_size
snake_case_ : List[Any] = max_size
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = hidden_dim
snake_case_ : Any = hidden_dim
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
snake_case_ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
snake_case_ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
snake_case_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case_ : List[Any] = self.num_queries
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Union[str, Any] = [1, 1, 1, 1]
snake_case_ : str = self.num_channels
snake_case_ : Any = 6_4
snake_case_ : int = 1_2_8
snake_case_ : Optional[Any] = self.hidden_dim
snake_case_ : Any = self.hidden_dim
snake_case_ : str = self.hidden_dim
return config
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ : Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = output.encoder_hidden_states
snake_case_ : Dict = output.pixel_decoder_hidden_states
snake_case_ : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_layers )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=False ) -> int:
'''simple docstring'''
with torch.no_grad():
snake_case_ : Dict = MaskaFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case_ : Optional[int] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
snake_case_ : Optional[Any] = model(_lowercase , output_hidden_states=_lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : List[str] = MaskaFormerForUniversalSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ : List[str] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
snake_case_ : List[Any] = model(_lowercase )
comm_check_on_output(_lowercase )
snake_case_ : Optional[Any] = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = MaskaFormerModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(_lowercase )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : str = [*signature.parameters.keys()]
snake_case_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case_ : Tuple = MaskaFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = (self.model_tester.min_size,) * 2
snake_case_ : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowercase ),
"""mask_labels""": torch.randn((2, 1_0, *size) , device=_lowercase ),
"""class_labels""": torch.zeros(2 , 1_0 , device=_lowercase ).long(),
}
snake_case_ : int = self.model_tester.get_config()
snake_case_ : int = MaskaFormerForUniversalSegmentation(_lowercase ).to(_lowercase )
snake_case_ : List[str] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_lowercase ).to(_lowercase )
snake_case_ : Optional[int] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
snake_case_ : int = self.all_model_classes[1]
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
snake_case_ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
snake_case_ : Optional[Any] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.all_model_classes[1]
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
snake_case_ : str = True
snake_case_ : List[str] = True
snake_case_ : str = model_class(_lowercase ).to(_lowercase )
model.train()
snake_case_ : Dict = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
snake_case_ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case_ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase : List[str] = 1e-4
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowercase )
snake_case_ : List[str] = self.default_image_processor
snake_case_ : List[str] = prepare_img()
snake_case_ : Any = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
snake_case_ : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**_lowercase )
snake_case_ : Optional[int] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
snake_case_ : List[str] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
snake_case_ : List[str] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
snake_case_ : str = self.default_image_processor
snake_case_ : Any = prepare_img()
snake_case_ : str = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
snake_case_ : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case_ : List[str] = model(**_lowercase )
# masks_queries_logits
snake_case_ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case_ : Union[str, Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case_ : int = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
snake_case_ : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case_ : int = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
snake_case_ : Union[str, Any] = self.default_image_processor
snake_case_ : List[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
snake_case_ : List[Any] = inputs["""pixel_values"""].to(_lowercase )
snake_case_ : Optional[int] = [el.to(_lowercase ) for el in inputs["""mask_labels"""]]
snake_case_ : Any = [el.to(_lowercase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
snake_case_ : Optional[Any] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 711 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''autoformer'''
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _lowercase = None , _lowercase = None , _lowercase = "student_t" , _lowercase = "nll" , _lowercase = 1 , _lowercase = [1, 2, 3, 4, 5, 6, 7] , _lowercase = True , _lowercase = 0 , _lowercase = 0 , _lowercase = 0 , _lowercase = 0 , _lowercase = None , _lowercase = None , _lowercase = 6_4 , _lowercase = 2 , _lowercase = 2 , _lowercase = 2 , _lowercase = 2 , _lowercase = 3_2 , _lowercase = 3_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 1_0_0 , _lowercase = 0.02 , _lowercase = True , _lowercase=True , _lowercase = 1_0 , _lowercase = 2_5 , _lowercase = 3 , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = prediction_length
snake_case_ : Tuple = context_length if context_length is not None else prediction_length
snake_case_ : Optional[int] = distribution_output
snake_case_ : List[str] = loss
snake_case_ : Dict = input_size
snake_case_ : str = num_time_features
snake_case_ : Union[str, Any] = lags_sequence
snake_case_ : Union[str, Any] = scaling
snake_case_ : str = num_dynamic_real_features
snake_case_ : List[Any] = num_static_real_features
snake_case_ : str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : Optional[Any] = cardinality
else:
snake_case_ : Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ : Optional[int] = embedding_dimension
else:
snake_case_ : str = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ : str = num_parallel_samples
# Transformer architecture configuration
snake_case_ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ : Any = d_model
snake_case_ : Optional[Any] = encoder_attention_heads
snake_case_ : Optional[int] = decoder_attention_heads
snake_case_ : List[Any] = encoder_ffn_dim
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Any = encoder_layers
snake_case_ : Any = decoder_layers
snake_case_ : Union[str, Any] = dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : str = encoder_layerdrop
snake_case_ : Any = decoder_layerdrop
snake_case_ : Tuple = activation_function
snake_case_ : Optional[Any] = init_std
snake_case_ : Optional[int] = use_cache
# Autoformer
snake_case_ : List[Any] = label_length
snake_case_ : Union[str, Any] = moving_average
snake_case_ : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 712 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int=True , __UpperCamelCase : Union[str, Any]="pt" ):
'''simple docstring'''
snake_case_ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(""" """ ) else {}
snake_case_ : Any = padding_side
return tokenizer(
[line] , max_length=__UpperCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Optional[int]=None , ):
'''simple docstring'''
snake_case_ : List[Any] = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase="train" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="" , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : List[str] = Path(_lowercase ).joinpath(type_path + """.source""" )
snake_case_ : Dict = Path(_lowercase ).joinpath(type_path + """.target""" )
snake_case_ : Optional[int] = self.get_char_lens(self.src_file )
snake_case_ : str = max_source_length
snake_case_ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
snake_case_ : List[Any] = tokenizer
snake_case_ : Dict = prefix
if n_obs is not None:
snake_case_ : Dict = self.src_lens[:n_obs]
snake_case_ : List[Any] = src_lang
snake_case_ : Optional[int] = tgt_lang
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , _lowercase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Optional[Any] = index + 1 # linecache starts at 1
snake_case_ : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip("""\n""" )
snake_case_ : Union[str, Any] = linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
snake_case_ : Dict = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
snake_case_ : str = encode_line(_lowercase , _lowercase , self.max_source_length , """right""" )
snake_case_ : Dict = encode_line(_lowercase , _lowercase , self.max_target_length , """right""" )
snake_case_ : Optional[int] = source_inputs["""input_ids"""].squeeze()
snake_case_ : Optional[Any] = target_inputs["""input_ids"""].squeeze()
snake_case_ : Optional[int] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> List[str]:
'''simple docstring'''
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def UpperCAmelCase__ ( self , _lowercase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : List[Any] = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ : Optional[Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ : Union[str, Any] = trim_batch(_lowercase , _lowercase )
snake_case_ : Dict = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
snake_case_ : Union[str, Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__lowerCAmelCase : List[Any] = getLogger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Any = get_git_info()
save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , """git_log.json""" ) )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any]=4 , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = git.Repo(search_parent_directories=__UpperCamelCase )
snake_case_ : Dict = {
"""repo_id""": str(__UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( __UpperCamelCase : Callable , __UpperCamelCase : Iterable ):
'''simple docstring'''
return list(map(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(__UpperCamelCase , """wb""" ) as f:
return pickle.dump(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
def remove_articles(__UpperCamelCase : List[str] ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : Optional[int] ):
snake_case_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = normalize_answer(__UpperCamelCase ).split()
snake_case_ : Optional[int] = normalize_answer(__UpperCamelCase ).split()
snake_case_ : int = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
snake_case_ : str = sum(common.values() )
if num_same == 0:
return 0
snake_case_ : Optional[Any] = 1.0 * num_same / len(__UpperCamelCase )
snake_case_ : Tuple = 1.0 * num_same / len(__UpperCamelCase )
snake_case_ : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ):
'''simple docstring'''
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
snake_case_ : Optional[Any] = 0
for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ):
em += exact_match_score(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ : Optional[int] = """dropout_rate"""
for p in extra_params:
if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
continue
snake_case_ : int = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
return hparams, config
| 713 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = CanineTokenizer
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
snake_case_ : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> CanineTokenizer:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
snake_case_ : Any = 1_0_2_4
return tokenizer
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.canine_tokenizer
snake_case_ : Any = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
snake_case_ : str = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
snake_case_ : str = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
snake_case_ : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowercase , _lowercase )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.canine_tokenizer
snake_case_ : Dict = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
snake_case_ : str = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertIn("""token_type_ids""" , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.canine_tokenizer
snake_case_ : List[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
snake_case_ : Union[str, Any] = tokenizer(
text_target=_lowercase , max_length=3_2 , padding="""max_length""" , truncation=_lowercase , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
snake_case_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : str = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
snake_case_ : int = tokenizer.__class__.from_pretrained(_lowercase )
snake_case_ : int = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
shutil.rmtree(_lowercase )
snake_case_ : List[str] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ : Tuple = tempfile.mkdtemp()
snake_case_ : Any = """ He is very happy, UNwant\u00E9d,running"""
snake_case_ : Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case_ : Tuple = chr(0Xe0_07 )
additional_special_tokens.append(_lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
snake_case_ : Tuple = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
snake_case_ : Optional[Any] = tokenizer.__class__.from_pretrained(_lowercase )
snake_case_ : Any = after_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
self.assertIn(_lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
snake_case_ : str = tokenizer.__class__.from_pretrained(_lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : int = self.get_clean_sequence(_lowercase )
# a special token for Canine can be defined as follows:
snake_case_ : int = 0Xe0_05
snake_case_ : Dict = chr(_lowercase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[str] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowercase )
snake_case_ : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , input_encoded + special_token_id )
snake_case_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Any = chr(0Xe0_05 )
snake_case_ : Optional[int] = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
snake_case_ : Tuple = tokenizer.tokenize(_lowercase )
snake_case_ : Optional[Any] = tokenizer.tokenize(_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
self.assertEqual(len(_lowercase ) , 1 )
self.assertEqual(token_a[0] , _lowercase )
self.assertEqual(token_a[0] , _lowercase )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
snake_case_ : Tuple = 0Xe0_06
snake_case_ : Any = chr(_lowercase )
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowercase )
tokenizer.from_pretrained(_lowercase )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
snake_case_ : Optional[int] = json.load(_lowercase )
with open(os.path.join(_lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
snake_case_ : str = json.load(_lowercase )
# a special token for Canine can be defined as follows:
snake_case_ : Tuple = 0Xe0_06
snake_case_ : List[Any] = chr(_lowercase )
snake_case_ : List[Any] = [new_token_a]
snake_case_ : List[str] = [new_token_a]
with open(os.path.join(_lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowercase , _lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ : Any = tokenizer_class.from_pretrained(_lowercase , extra_ids=0 )
self.assertIn(_lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
snake_case_ : Any = 0Xe0_07
snake_case_ : int = chr(_lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ : Tuple = [AddedToken(_lowercase , lstrip=_lowercase )]
snake_case_ : Dict = tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , extra_ids=0 )
self.assertIn(_lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """hello world"""
if self.space_between_special_tokens:
snake_case_ : str = """[CLS] hello world [SEP]"""
else:
snake_case_ : List[Any] = input
snake_case_ : str = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case_ : Union[str, Any] = tokenizer.decode(_lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowercase , [output, output.lower()] )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
snake_case_ : int = """a"""
snake_case_ : Dict = ord(_lowercase )
for attr in attributes_list:
setattr(_lowercase , attr + """_id""" , _lowercase )
self.assertEqual(getattr(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(getattr(_lowercase , attr + """_id""" ) , _lowercase )
setattr(_lowercase , attr + """_id""" , _lowercase )
self.assertEqual(getattr(_lowercase , _lowercase ) , _lowercase )
self.assertEqual(getattr(_lowercase , attr + """_id""" ) , _lowercase )
setattr(_lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens_ids""" ) , [] )
snake_case_ : Dict = 0Xe0_06
snake_case_ : Dict = chr(_lowercase )
setattr(_lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
| 714 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : str = nn.Linear(3 , 4 )
snake_case_ : Tuple = nn.BatchNormad(4 )
snake_case_ : str = nn.Linear(4 , 5 )
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
return output + 1
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = ModelForTest()
snake_case_ : Union[str, Any] = ModelHook()
add_hook_to_module(_lowercase , _lowercase )
self.assertEqual(test_model._hf_hook , _lowercase )
self.assertTrue(hasattr(_lowercase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowercase )
self.assertFalse(hasattr(_lowercase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowercase , """_old_forward""" ) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : str = ModelForTest()
snake_case_ : List[Any] = ModelHook()
add_hook_to_module(_lowercase , _lowercase )
add_hook_to_module(_lowercase , _lowercase , append=_lowercase )
self.assertEqual(isinstance(test_model._hf_hook , _lowercase ) , _lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(_lowercase , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(_lowercase )
self.assertFalse(hasattr(_lowercase , """_hf_hook""" ) )
self.assertFalse(hasattr(_lowercase , """_old_forward""" ) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = ModelForTest()
snake_case_ : List[Any] = torch.randn(2 , 3 )
snake_case_ : List[Any] = test_model(x + 1 )
snake_case_ : Tuple = test_model(x + 2 )
snake_case_ : List[str] = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Dict = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case_ : List[Any] = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case_ : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : List[Any] = test_model(_lowercase )
assert torch.allclose(_lowercase , _lowercase , atol=1E-5 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = ModelForTest()
snake_case_ : Union[str, Any] = torch.randn(2 , 3 )
snake_case_ : Optional[Any] = test_model(_lowercase )
snake_case_ : int = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case_ : Dict = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Dict = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case_ : List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : str = test_model(_lowercase )
assert torch.allclose(_lowercase , output + 2 , atol=1E-5 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = ModelForTest()
snake_case_ : Dict = torch.randn(2 , 3 )
snake_case_ : Optional[int] = test_model(_lowercase )
snake_case_ : List[str] = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
snake_case_ : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = test_model(_lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
snake_case_ : Tuple = torch.randn(2 , 3 )
snake_case_ : Optional[Any] = model(_lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase ) )
snake_case_ : List[Any] = torch.randn(2 , 3 ).to(0 )
snake_case_ : Tuple = model(_lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ : Optional[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ : Union[str, Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
snake_case_ : Optional[int] = torch.randn(2 , 3 )
snake_case_ : Dict = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
snake_case_ : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ : Dict = torch.randn(2 , 3 )
snake_case_ : Dict = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ : List[Any] = torch.device(_lowercase )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
snake_case_ : int = torch.randn(2 , 3 )
snake_case_ : Optional[Any] = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ : List[Any] = torch.randn(2 , 3 )
snake_case_ : int = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ : Dict = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ : List[Any] = torch.device(_lowercase )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
snake_case_ : Any = torch.randn(2 , 3 )
snake_case_ : int = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ : Optional[int] = torch.randn(2 , 3 )
snake_case_ : str = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 715 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : int=None , __UpperCamelCase : Dict=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
_lowerCamelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
_lowerCamelCase = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Benchmark training of model'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Verbose memory tracing'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Trace memory line by line'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Save result to a CSV file'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Save all print statements in a log file'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to print environment information'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
_lowerCamelCase = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
_lowerCamelCase = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
_lowerCamelCase = field(
default=f'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
_lowerCamelCase = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
_lowerCamelCase = field(
default=f'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
_lowerCamelCase = field(
default=f'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
_lowerCamelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , _lowercase , )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 716 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = (KDPMaDiscreteScheduler,)
_lowerCamelCase = 10
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_lowercase )
return config
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case_ : Union[str, Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : Any = self.dummy_model()
snake_case_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Optional[int] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Tuple = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : List[str] = model(_lowercase , _lowercase )
snake_case_ : Optional[Any] = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : int = output.prev_sample
snake_case_ : List[str] = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Tuple = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
if torch_device == "mps":
return
snake_case_ : Optional[Any] = self.scheduler_classes[0]
snake_case_ : str = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ : int = self.dummy_model()
snake_case_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : List[str] = sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : List[Any] = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : str = model(_lowercase , _lowercase )
snake_case_ : str = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : int = output.prev_sample
snake_case_ : int = torch.sum(torch.abs(_lowercase ) )
snake_case_ : Optional[Any] = torch.mean(torch.abs(_lowercase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if torch_device == "mps":
return
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config()
snake_case_ : Dict = scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
snake_case_ : str = self.dummy_model()
snake_case_ : List[Any] = self.dummy_sample_deter.to(_lowercase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ : Tuple = scheduler.scale_model_input(_lowercase , _lowercase )
snake_case_ : List[Any] = model(_lowercase , _lowercase )
snake_case_ : str = scheduler.step(_lowercase , _lowercase , _lowercase )
snake_case_ : Optional[Any] = output.prev_sample
snake_case_ : int = torch.sum(torch.abs(_lowercase ) )
snake_case_ : str = torch.mean(torch.abs(_lowercase ) )
if str(_lowercase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 717 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowerCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowerCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowerCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_lowerCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.train_file is not None:
snake_case_ : Dict = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case_ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Any = [json.loads(__UpperCamelCase ) for line in f.read().splitlines() if (len(__UpperCamelCase ) > 0 and not line.isspace())]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
snake_case_ : str = {c: dataset[c] for c in dataset.column_names}
snake_case_ : Dict = refs
return Dataset.from_dict(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
snake_case_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
snake_case_ : List[str] = {}
if data_args.train_file is not None:
snake_case_ : Dict = data_args.train_file
if data_args.validation_file is not None:
snake_case_ : Any = data_args.validation_file
snake_case_ : Tuple = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case_ : Optional[int] = """text"""
snake_case_ : List[str] = load_dataset(__UpperCamelCase , data_files=__UpperCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
snake_case_ : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
snake_case_ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
snake_case_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case_ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case_ : Dict = AutoModelForMaskedLM.from_config(__UpperCamelCase )
model.resize_token_embeddings(len(__UpperCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case_ : Optional[int] = datasets["""train"""].column_names
else:
snake_case_ : List[str] = datasets["""validation"""].column_names
snake_case_ : List[str] = """text""" if """text""" in column_names else column_names[0]
snake_case_ : Optional[Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__UpperCamelCase : List[Any] ):
# Remove empty lines
snake_case_ : Optional[Any] = [line for line in examples["""text"""] if len(__UpperCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=data_args.max_seq_length )
snake_case_ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case_ : Union[str, Any] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case_ : Optional[Any] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case_ : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case_ : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case_ : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=__UpperCamelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case_ : Optional[int] = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ : Any = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case_ : List[str] = model_args.model_name_or_path
else:
snake_case_ : Optional[int] = None
snake_case_ : Any = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ : List[str] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case_ : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ : str = trainer.evaluate()
snake_case_ : Tuple = math.exp(eval_output["""eval_loss"""] )
snake_case_ : int = perplexity
snake_case_ : str = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[float] , __UpperCamelCase : Any ):
'''simple docstring'''
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__UpperCamelCase ):
print(F'{i}\t\t{d}' )
def __lowerCAmelCase ( __UpperCamelCase : list[dict[str, int]] , __UpperCamelCase : list[float] , __UpperCamelCase : int ):
'''simple docstring'''
for j in range(__UpperCamelCase ):
snake_case_ : Any = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCAmelCase ( __UpperCamelCase : list[dict[str, int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = [float("""inf""" )] * vertex_count
snake_case_ : List[str] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__UpperCamelCase ):
snake_case_ : Tuple = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ : Optional[Any] = distance[u] + w
snake_case_ : Tuple = check_negative_cycle(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = int(input('''Enter number of vertices: ''').strip())
__lowerCAmelCase : List[Any] = int(input('''Enter number of edges: ''').strip())
__lowerCAmelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__lowerCAmelCase : Dict = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__lowerCAmelCase : List[str] = {'''src''': src, '''dst''': dest, '''weight''': weight}
__lowerCAmelCase : List[Any] = int(input('''\nEnter shortest path source:''').strip())
__lowerCAmelCase : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase : Any = random.Random()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : str=None , __UpperCamelCase : Tuple=None ):
'''simple docstring'''
if rng is None:
snake_case_ : Union[str, Any] = global_rng
snake_case_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=4_0_0 , _lowercase=2_0_0_0 , _lowercase=1_0 , _lowercase=1_6_0 , _lowercase=8 , _lowercase=0.0 , _lowercase=4_0_0_0 , _lowercase=False , _lowercase=True , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[int] = min_seq_length
snake_case_ : Optional[Any] = max_seq_length
snake_case_ : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : str = padding_value
snake_case_ : Tuple = sampling_rate
snake_case_ : List[str] = return_attention_mask
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = feature_size
snake_case_ : List[Any] = chunk_length
snake_case_ : int = hop_length
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self , _lowercase=False , _lowercase=False ) -> List[str]:
'''simple docstring'''
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
snake_case_ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : List[Any] = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Any = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Any = feat_extract_first.save_pretrained(_lowercase )[0]
check_json_file_has_correct_format(_lowercase )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(_lowercase )
snake_case_ : List[Any] = feat_extract_first.to_dict()
snake_case_ : Tuple = feat_extract_second.to_dict()
snake_case_ : Tuple = feat_extract_first.mel_filters
snake_case_ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[str] = os.path.join(_lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(_lowercase )
snake_case_ : Tuple = self.feature_extraction_class.from_json_file(_lowercase )
snake_case_ : Dict = feat_extract_first.to_dict()
snake_case_ : str = feat_extract_second.to_dict()
snake_case_ : Any = feat_extract_first.mel_filters
snake_case_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case_ : Union[str, Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : List[str] = feature_extractor(_lowercase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
snake_case_ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
snake_case_ : int = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : Optional[int] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case_ : Tuple = np.asarray(_lowercase )
snake_case_ : Dict = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test truncation required
snake_case_ : List[Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case_ : str = [np.asarray(_lowercase ) for speech_input in speech_inputs]
snake_case_ : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : List[Any] = [np.asarray(_lowercase ) for speech_input in speech_inputs_truncated]
snake_case_ : Optional[Any] = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
snake_case_ : str = feature_extractor(_lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
import torch
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Tuple = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Dict = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
snake_case_ : Optional[int] = ds.sort("""id""" ).select(range(_lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
snake_case_ : Tuple = self._load_datasamples(1 )
snake_case_ : Dict = WhisperFeatureExtractor()
snake_case_ : List[str] = feature_extractor(_lowercase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , _lowercase , atol=1E-4 ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Dict = self._load_datasamples(1 )[0]
snake_case_ : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case_ : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_lowercase )[0]
self.assertTrue(np.all(np.mean(_lowercase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase ) - 1 ) < 1E-3 ) )
| 720 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 0 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''philschmid/bart-large-cnn-samsum'''
_lowerCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
_lowerCamelCase = '''summarizer'''
_lowerCamelCase = AutoTokenizer
_lowerCamelCase = AutoModelForSeqaSeqLM
_lowerCamelCase = ['''text''']
_lowerCamelCase = ['''text''']
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.pre_processor(_lowercase , return_tensors="""pt""" , truncation=_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
return self.model.generate(**_lowercase )[0]
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.pre_processor.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
| 721 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase=1_2_5 , _lowercase=None , **_lowercase , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Dict = [f'<extra_id_{i}>' for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : Optional[Any] = len(set(filter(lambda _lowercase : bool("""extra_id""" in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
super().__init__(
eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
snake_case_ : List[str] = extra_ids
snake_case_ : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
snake_case_ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case_ : int = len(self.special_tokens_encoder )
snake_case_ : Tuple = len(_lowercase )
for i, token in enumerate(_lowercase ):
snake_case_ : List[str] = self.vocab_size + i - n
snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase ) -> List[int]:
'''simple docstring'''
if len(_lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = self._add_eos_if_not_present(_lowercase )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : List[Any] = self._add_eos_if_not_present(_lowercase )
return token_ids_a + token_ids_a
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : int = [chr(_lowercase ) for i in text.encode("""utf-8""" )]
return tokens
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
if token in self.special_tokens_encoder:
snake_case_ : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case_ : List[Any] = self.added_tokens_encoder[token]
elif len(_lowercase ) != 1:
snake_case_ : List[Any] = self.unk_token_id
else:
snake_case_ : List[Any] = ord(_lowercase ) + self._num_special_tokens
return token_id
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
if index in self.special_tokens_decoder:
snake_case_ : Dict = self.special_tokens_decoder[index]
else:
snake_case_ : Any = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case_ : List[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case_ : Optional[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case_ : Optional[Any] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case_ : Optional[int] = token.encode("""utf-8""" )
else:
snake_case_ : Optional[Any] = bytes([ord(_lowercase )] )
bstring += tok_string
snake_case_ : List[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
return ()
| 701 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
debug_launcher(test_script.main )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
debug_launcher(test_ops.main )
| 702 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 2_0_0 ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
snake_case_ : Union[str, Any] = [0] * (pence + 1)
snake_case_ : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 703 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 0 |
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase : Optional[Any] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCAmelCase : int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCAmelCase : List[Any] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = spearmanr(_lowercase , _lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 704 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 0 |
"""simple docstring"""
__lowerCAmelCase : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCAmelCase : List[str] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCAmelCase : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 705 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : bytes ):
'''simple docstring'''
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , *_lowercase , **_lowercase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 707 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
__lowerCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __lowerCAmelCase ( __UpperCamelCase : list[int] , __UpperCamelCase : tuple[int, ...] ):
'''simple docstring'''
snake_case_ : str = ""
snake_case_ : int
snake_case_ : int
snake_case_ : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) , __UpperCamelCase ):
snake_case_ : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def __lowerCAmelCase ( __UpperCamelCase : list[int] ):
'''simple docstring'''
snake_case_ : list[str] = []
for key in product(__UpperCamelCase , repeat=3 ):
snake_case_ : Dict = try_key(__UpperCamelCase , __UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def __lowerCAmelCase ( __UpperCamelCase : list[str] , __UpperCamelCase : str ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __lowerCAmelCase ( __UpperCamelCase : str = "p059_cipher.txt" ):
'''simple docstring'''
snake_case_ : list[int]
snake_case_ : list[str]
snake_case_ : str
snake_case_ : str
snake_case_ : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding="""utf-8""" )
snake_case_ : int = [int(__UpperCamelCase ) for number in data.strip().split(""",""" )]
snake_case_ : Tuple = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
snake_case_ : Optional[int] = filter_common_word(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
snake_case_ : Optional[int] = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __lowerCAmelCase ( __UpperCamelCase : List[DatasetType] , __UpperCamelCase : Optional[List[float]] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}.' )
if i == 0:
snake_case_ : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , stopping_strategy=__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[DatasetType] , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}.' )
if i == 0:
snake_case_ : List[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase , __UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase , info=__UpperCamelCase , split=__UpperCamelCase , axis=__UpperCamelCase )
| 709 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''longformer'''
def __init__( self , _lowercase = 5_1_2 , _lowercase = 2 , _lowercase = 1 , _lowercase = 0 , _lowercase = 2 , _lowercase = 3_0_5_2_2 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 3_0_7_2 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 5_1_2 , _lowercase = 2 , _lowercase = 0.02 , _lowercase = 1E-12 , _lowercase = False , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
snake_case_ : str = attention_window
snake_case_ : int = sep_token_id
snake_case_ : Dict = bos_token_id
snake_case_ : List[Any] = eos_token_id
snake_case_ : List[Any] = vocab_size
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[str] = hidden_act
snake_case_ : str = intermediate_size
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : Tuple = onnx_export
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = "default" , _lowercase = None ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase , _lowercase , _lowercase )
snake_case_ : List[Any] = True
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case_ : List[Any] = super().outputs
if self.task == "default":
snake_case_ : Optional[int] = {0: """batch"""}
return outputs
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case_ : Union[str, Any] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
snake_case_ : Dict = 1
return inputs
| 710 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 711 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCAmelCase : List[Any] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 0 |
"""simple docstring"""
__lowerCAmelCase : Tuple = 0 # The first color of the flag.
__lowerCAmelCase : List[Any] = 1 # The second color of the flag.
__lowerCAmelCase : int = 2 # The third color of the flag.
__lowerCAmelCase : Optional[int] = (red, white, blue)
def __lowerCAmelCase ( __UpperCamelCase : list ):
'''simple docstring'''
if not sequence:
return []
if len(__UpperCamelCase ) == 1:
return list(__UpperCamelCase )
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = len(__UpperCamelCase ) - 1
snake_case_ : int = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case_ : Dict = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case_ : Optional[int] = sequence[high], sequence[mid]
high -= 1
else:
snake_case_ : List[Any] = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by commas:\n''').strip()
__lowerCAmelCase : int = [int(item.strip()) for item in user_input.split(''',''')]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 713 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gptsan-japanese'''
_lowerCamelCase = [
'''past_key_values''',
]
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=3_6_0_0_0 , _lowercase=1_2_8_0 , _lowercase=1_0_2_4 , _lowercase=8_1_9_2 , _lowercase=4_0_9_6 , _lowercase=1_2_8 , _lowercase=1_0 , _lowercase=0 , _lowercase=1_6 , _lowercase=1_6 , _lowercase=1_2_8 , _lowercase=0.0 , _lowercase=1E-5 , _lowercase=False , _lowercase=0.0 , _lowercase="float32" , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=0.002 , _lowercase=False , _lowercase=True , _lowercase=3_5_9_9_8 , _lowercase=3_5_9_9_5 , _lowercase=3_5_9_9_9 , **_lowercase , ) -> int:
'''simple docstring'''
snake_case_ : Any = vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : List[Any] = d_model
snake_case_ : Any = d_ff
snake_case_ : List[str] = d_ext
snake_case_ : str = d_spout
snake_case_ : str = num_switch_layers
snake_case_ : Optional[Any] = num_ext_layers
snake_case_ : Optional[Any] = num_switch_layers + num_ext_layers
snake_case_ : List[str] = num_heads
snake_case_ : Any = num_experts
snake_case_ : List[str] = expert_capacity
snake_case_ : Dict = dropout_rate
snake_case_ : Optional[Any] = layer_norm_epsilon
snake_case_ : List[Any] = router_bias
snake_case_ : Tuple = router_jitter_noise
snake_case_ : str = router_dtype
snake_case_ : Tuple = router_ignore_padding_tokens
snake_case_ : str = output_hidden_states
snake_case_ : Optional[Any] = output_attentions
snake_case_ : str = initializer_factor
snake_case_ : Tuple = output_router_logits
snake_case_ : List[Any] = use_cache
super().__init__(
separator_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
| 714 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
snake_case_ : Union[str, Any] = Dataset.from_dict(__UpperCamelCase )
return dataset
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = get_dataset()
snake_case_ : Optional[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = get_dataset()
snake_case_ : Union[str, Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 715 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 716 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase : int = TypeVar('''T''')
__lowerCAmelCase : Optional[int] = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase : List[Any] = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase : Dict = Union[str, bytes, os.PathLike]
| 717 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowerCamelCase = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowerCamelCase = '''question'''
_lowerCamelCase = '''context'''
_lowerCamelCase = '''answers'''
@property
def UpperCAmelCase__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCAmelCase : Optional[Any] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
__lowerCAmelCase : str = dataset.iloc[:, 1:2].values
__lowerCAmelCase : Optional[int] = dataset.iloc[:, 2].values
__lowerCAmelCase : Union[str, Any] = train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCAmelCase : Dict = PolynomialFeatures(degree=4)
__lowerCAmelCase : int = poly_reg.fit_transform(X)
__lowerCAmelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCAmelCase ( ):
'''simple docstring'''
plt.scatter(__UpperCamelCase , __UpperCamelCase , color="""red""" )
plt.plot(__UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(__UpperCamelCase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list[float] ):
'''simple docstring'''
if len(__UpperCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
snake_case_ : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 0 |
"""simple docstring"""
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 721 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(_lowercase , _lowercase ):
snake_case_ : Union[str, Any] = v.to_dict()
return d
| 700 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = []
snake_case_ : Optional[Any] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
snake_case_ : int = result + left + right
return input_list
def __lowerCAmelCase ( __UpperCamelCase : list ):
'''simple docstring'''
if len(__UpperCamelCase ) <= 1:
return input_list
snake_case_ : Any = list(__UpperCamelCase )
# iteration for two-way merging
snake_case_ : Optional[Any] = 2
while p <= len(__UpperCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
snake_case_ : Tuple = i
snake_case_ : List[Any] = i + p - 1
snake_case_ : str = (low + high + 1) // 2
snake_case_ : Union[str, Any] = merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# final merge of last two parts
if p * 2 >= len(__UpperCamelCase ):
snake_case_ : Optional[Any] = i
snake_case_ : List[Any] = merge(__UpperCamelCase , 0 , __UpperCamelCase , len(__UpperCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
__lowerCAmelCase : str = []
else:
__lowerCAmelCase : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 701 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = x
snake_case_ : str = y
for step in range(__UpperCamelCase ): # noqa: B007
snake_case_ : List[str] = a * a - b * b + x
snake_case_ : Tuple = 2 * a * b + y
snake_case_ : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase ( __UpperCamelCase : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __lowerCAmelCase ( __UpperCamelCase : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__UpperCamelCase , 1 , 1 ) )
def __lowerCAmelCase ( __UpperCamelCase : int = 8_0_0 , __UpperCamelCase : int = 6_0_0 , __UpperCamelCase : float = -0.6 , __UpperCamelCase : float = 0 , __UpperCamelCase : float = 3.2 , __UpperCamelCase : int = 5_0 , __UpperCamelCase : bool = True , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
snake_case_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCamelCase ):
for image_y in range(__UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ : Dict = figure_width / image_width * image_height
snake_case_ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ : Tuple = get_distance(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ : str = get_color_coded_rgb(__UpperCamelCase )
else:
snake_case_ : List[Any] = get_black_and_white_rgb(__UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__lowerCAmelCase : Tuple = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 702 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''encodec'''
def __init__( self , _lowercase=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowercase=2_4_0_0_0 , _lowercase=1 , _lowercase=False , _lowercase=None , _lowercase=None , _lowercase=1_2_8 , _lowercase=3_2 , _lowercase=1 , _lowercase=[8, 5, 4, 2] , _lowercase="weight_norm" , _lowercase=7 , _lowercase=7 , _lowercase=3 , _lowercase=2 , _lowercase=True , _lowercase="reflect" , _lowercase=2 , _lowercase=2 , _lowercase=1.0 , _lowercase=1_0_2_4 , _lowercase=None , _lowercase=True , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : int = target_bandwidths
snake_case_ : Optional[Any] = sampling_rate
snake_case_ : List[str] = audio_channels
snake_case_ : Union[str, Any] = normalize
snake_case_ : List[str] = chunk_length_s
snake_case_ : Union[str, Any] = overlap
snake_case_ : Dict = hidden_size
snake_case_ : Union[str, Any] = num_filters
snake_case_ : Dict = num_residual_layers
snake_case_ : Dict = upsampling_ratios
snake_case_ : Optional[int] = norm_type
snake_case_ : Optional[Any] = kernel_size
snake_case_ : Dict = last_kernel_size
snake_case_ : Tuple = residual_kernel_size
snake_case_ : Tuple = dilation_growth_rate
snake_case_ : List[Any] = use_causal_conv
snake_case_ : Optional[int] = pad_mode
snake_case_ : int = compress
snake_case_ : int = num_lstm_layers
snake_case_ : Optional[int] = trim_right_ratio
snake_case_ : List[Any] = codebook_size
snake_case_ : Dict = codebook_dim if codebook_dim is not None else hidden_size
snake_case_ : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 704 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''levit'''
def __init__( self , _lowercase=2_2_4 , _lowercase=3 , _lowercase=3 , _lowercase=2 , _lowercase=1 , _lowercase=1_6 , _lowercase=[1_2_8, 2_5_6, 3_8_4] , _lowercase=[4, 8, 1_2] , _lowercase=[4, 4, 4] , _lowercase=[1_6, 1_6, 1_6] , _lowercase=0 , _lowercase=[2, 2, 2] , _lowercase=[2, 2, 2] , _lowercase=0.02 , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Union[str, Any] = image_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = kernel_size
snake_case_ : str = stride
snake_case_ : Any = padding
snake_case_ : Dict = hidden_sizes
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : List[str] = depths
snake_case_ : List[Any] = key_dim
snake_case_ : List[Any] = drop_path_rate
snake_case_ : List[Any] = patch_size
snake_case_ : int = attention_ratio
snake_case_ : int = mlp_ratio
snake_case_ : int = initializer_range
snake_case_ : Tuple = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 705 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = ort.SessionOptions()
snake_case_ : List[str] = False
return options
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Optional[int] = """A red cat sitting on a park bench"""
snake_case_ : Optional[Any] = np.random.RandomState(0 )
snake_case_ : Tuple = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowercase , output_type="""np""" , )
snake_case_ : str = output.images
snake_case_ : Any = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ : Dict = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """A red cat sitting on a park bench"""
snake_case_ : List[Any] = np.random.RandomState(0 )
snake_case_ : List[Any] = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Tuple = output.images
snake_case_ : int = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 706 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : List[str] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase : Optional[int] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__lowerCAmelCase : Dict = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def remove_articles(__UpperCamelCase : str ):
snake_case_ : Tuple = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__UpperCamelCase , """ """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : Any ):
snake_case_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = [any(compute_exact(__UpperCamelCase , __UpperCamelCase ) for ref in refs ) for pred, refs in zip(__UpperCamelCase , __UpperCamelCase )]
return (sum(__UpperCamelCase ) / len(__UpperCamelCase )) * 1_0_0
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
snake_case_ : List[str] = Counter(__UpperCamelCase )
snake_case_ : str = Counter(__UpperCamelCase )
snake_case_ : str = Counter()
for sgram, scount in sgramcounter.items():
snake_case_ : Optional[int] = scount * numref
snake_case_ : int = Counter(__UpperCamelCase )
snake_case_ : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
snake_case_ : str = ccount * numref
# KEEP
snake_case_ : Optional[Any] = sgramcounter_rep & cgramcounter_rep
snake_case_ : Dict = keepgramcounter_rep & rgramcounter
snake_case_ : Optional[Any] = sgramcounter_rep & rgramcounter
snake_case_ : Optional[int] = 0
snake_case_ : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : Optional[int] = 1
snake_case_ : Tuple = 1
if len(__UpperCamelCase ) > 0:
snake_case_ : int = keeptmpscorea / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
snake_case_ : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
snake_case_ : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
snake_case_ : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
snake_case_ : Union[str, Any] = sgramcounter_rep - cgramcounter_rep
snake_case_ : Optional[Any] = delgramcounter_rep - rgramcounter
snake_case_ : Tuple = sgramcounter_rep - rgramcounter
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : str = 1
if len(__UpperCamelCase ) > 0:
snake_case_ : Tuple = deltmpscorea / len(__UpperCamelCase )
# ADDITION
snake_case_ : Any = set(__UpperCamelCase ) - set(__UpperCamelCase )
snake_case_ : Tuple = set(__UpperCamelCase ) & set(__UpperCamelCase )
snake_case_ : Union[str, Any] = set(__UpperCamelCase ) - set(__UpperCamelCase )
snake_case_ : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
snake_case_ : Tuple = 1
snake_case_ : Optional[Any] = 1
if len(__UpperCamelCase ) > 0:
snake_case_ : Dict = addtmpscore / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ : Tuple = addtmpscore / len(__UpperCamelCase )
snake_case_ : int = 0
if addscore_precision > 0 or addscore_recall > 0:
snake_case_ : Optional[int] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[str] = len(__UpperCamelCase )
snake_case_ : Union[str, Any] = ssent.split(""" """ )
snake_case_ : Optional[Any] = csent.split(""" """ )
snake_case_ : int = []
snake_case_ : int = []
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
snake_case_ : str = []
snake_case_ : Union[str, Any] = []
snake_case_ : Dict = []
snake_case_ : List[str] = []
snake_case_ : int = []
snake_case_ : Dict = []
for rsent in rsents:
snake_case_ : Any = rsent.split(""" """ )
snake_case_ : List[str] = []
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = []
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case_ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case_ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case_ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case_ : Optional[Any] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case_ : List[str] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case_ : Optional[Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
snake_case_ : Union[str, Any] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
snake_case_ : Any = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
snake_case_ : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__UpperCamelCase )
(snake_case_) : Tuple = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
(snake_case_) : Any = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
(snake_case_) : Any = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
(snake_case_) : str = SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
snake_case_ : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
snake_case_ : Any = sum([addascore, addascore, addascore, addascore] ) / 4
snake_case_ : Tuple = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : bool = True , __UpperCamelCase : str = "13a" , __UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
snake_case_ : Optional[int] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
snake_case_ : str = sacrebleu.metrics.bleu._get_tokenizer(__UpperCamelCase )()(__UpperCamelCase )
else:
snake_case_ : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__UpperCamelCase )
elif tokenizer == "moses":
snake_case_ : int = sacremoses.MosesTokenizer().tokenize(__UpperCamelCase , return_str=__UpperCamelCase , escape=__UpperCamelCase )
elif tokenizer == "penn":
snake_case_ : Tuple = sacremoses.MosesTokenizer().penn_tokenize(__UpperCamelCase , return_str=__UpperCamelCase )
else:
snake_case_ : str = sentence
if not return_str:
snake_case_ : List[Any] = normalized_sent.split()
return normalized_sent
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
if not (len(__UpperCamelCase ) == len(__UpperCamelCase ) == len(__UpperCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
snake_case_ : Dict = 0
for src, pred, refs in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
sari_score += SARIsent(normalize(__UpperCamelCase ) , normalize(__UpperCamelCase ) , [normalize(__UpperCamelCase ) for sent in refs] )
snake_case_ : Dict = sari_score / len(__UpperCamelCase )
return 1_0_0 * sari_score
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]="exp" , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : str=False , __UpperCamelCase : int=False , ):
'''simple docstring'''
snake_case_ : str = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
snake_case_ : List[Any] = sacrebleu.corpus_bleu(
__UpperCamelCase , __UpperCamelCase , smooth_method=__UpperCamelCase , smooth_value=__UpperCamelCase , force=__UpperCamelCase , lowercase=__UpperCamelCase , use_effective_order=__UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
result.update({"""sari""": compute_sari(sources=_lowercase , predictions=_lowercase , references=_lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowercase , references=_lowercase )} )
result.update({"""exact""": compute_em(predictions=_lowercase , references=_lowercase )} )
return result
| 707 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ''''''
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowercase = "" , _lowercase = None , _lowercase = None , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Dict = fsspec.open(
_lowercase , mode="""rb""" , protocol=_lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : List[str] = os.path.basename(self.file.path.split("""::""" )[0] )
snake_case_ : str = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case_ : Any = None
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> List[Any]:
'''simple docstring'''
return super()._strip_protocol(_lowercase ).lstrip("""/""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Any = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
snake_case_ : Dict = {f["""name"""]: f}
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.file.open().read()
def UpperCAmelCase__ ( self , _lowercase , _lowercase = "rb" , _lowercase=None , _lowercase=True , _lowercase=None , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : str = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''.bz2'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''.gz'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''.lz4'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''.xz'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''.zst'''
def __init__( self , _lowercase , _lowercase = "rb" , _lowercase = None , _lowercase = None , _lowercase = DEFAULT_BLOCK_SIZE , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Union[str, Any] = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = file_
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self ) -> Dict:
'''simple docstring'''
return iter(self._file )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return next(self._file )
def __getattr__( self , _lowercase ) -> Any:
'''simple docstring'''
return getattr(self._file , _lowercase )
def fixed_enter(*_lowercase , **_lowercase ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
snake_case_ : List[str] = fixed_enter
| 708 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['''ConditionalDetrFeatureExtractor''']
__lowerCAmelCase : Optional[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
snake_case_ : Union[str, Any] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __UpperCamelCase )
if matches:
snake_case_ : str = float(matches[1] )
snake_case_ : List[str] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case_ : str = 1_0_0_1
snake_case_ : Optional[int] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[int] = """huggingface/label-files"""
snake_case_ : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Optional[Any] = {int(__UpperCamelCase ) + 1: v for k, v in idalabel.items()}
snake_case_ : Dict = """background"""
snake_case_ : Any = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
snake_case_ : List[Any] = get_mobilenet_va_config(__UpperCamelCase )
# Load 🤗 model
snake_case_ : int = MobileNetVaForImageClassification(__UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case_ : List[str] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 3_2} , )
snake_case_ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ : Dict = model(**__UpperCamelCase )
snake_case_ : List[Any] = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
snake_case_ : Any = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case_ : Any = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
snake_case_ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
snake_case_ : int = """google/""" + model_name
image_processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 710 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 711 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : int = tau * frequency / samplerate
snake_case_ : Any = sin(__UpperCamelCase )
snake_case_ : str = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : List[str] = (1 - _cos) / 2
snake_case_ : str = 1 - _cos
snake_case_ : Union[str, Any] = 1 + alpha
snake_case_ : Tuple = -2 * _cos
snake_case_ : int = 1 - alpha
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Any = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Tuple = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : str = (1 + _cos) / 2
snake_case_ : Union[str, Any] = -1 - _cos
snake_case_ : List[Any] = 1 + alpha
snake_case_ : str = -2 * _cos
snake_case_ : Optional[int] = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : str = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Any = cos(__UpperCamelCase )
snake_case_ : Optional[Any] = _sin / (2 * q_factor)
snake_case_ : str = _sin / 2
snake_case_ : List[Any] = 0
snake_case_ : Any = -ba
snake_case_ : Optional[int] = 1 + alpha
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : Any = 1 - alpha
snake_case_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : int = tau * frequency / samplerate
snake_case_ : str = sin(__UpperCamelCase )
snake_case_ : Optional[int] = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : List[str] = 1 - alpha
snake_case_ : Optional[Any] = -2 * _cos
snake_case_ : List[Any] = 1 + alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tau * frequency / samplerate
snake_case_ : Optional[Any] = sin(__UpperCamelCase )
snake_case_ : Tuple = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : Any = 1_0 ** (gain_db / 4_0)
snake_case_ : Optional[Any] = 1 + alpha * big_a
snake_case_ : Any = -2 * _cos
snake_case_ : Any = 1 - alpha * big_a
snake_case_ : Union[str, Any] = 1 + alpha / big_a
snake_case_ : List[str] = -2 * _cos
snake_case_ : int = 1 - alpha / big_a
snake_case_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : List[Any] = tau * frequency / samplerate
snake_case_ : List[str] = sin(__UpperCamelCase )
snake_case_ : Optional[int] = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : int = 1_0 ** (gain_db / 4_0)
snake_case_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : int = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : List[str] = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[int] = big_a * (pmc + aaa)
snake_case_ : Optional[Any] = 2 * big_a * mpc
snake_case_ : Union[str, Any] = big_a * (pmc - aaa)
snake_case_ : Dict = ppmc + aaa
snake_case_ : str = -2 * pmpc
snake_case_ : str = ppmc - aaa
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Dict = tau * frequency / samplerate
snake_case_ : int = sin(__UpperCamelCase )
snake_case_ : List[Any] = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : Dict = 1_0 ** (gain_db / 4_0)
snake_case_ : Dict = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : List[str] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : int = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[Any] = big_a * (ppmc + aaa)
snake_case_ : List[str] = -2 * big_a * pmpc
snake_case_ : Any = big_a * (ppmc - aaa)
snake_case_ : Optional[Any] = pmc + aaa
snake_case_ : Optional[Any] = 2 * mpc
snake_case_ : str = pmc - aaa
snake_case_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 712 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 0 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = name
snake_case_ : str = val
def __str__( self ) -> Dict:
'''simple docstring'''
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = {}
snake_case_ : Dict = {}
snake_case_ : str = self.build_heap(_lowercase )
def __getitem__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
return self.get_value(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
return (idx - 1) // 2
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
return idx * 2 + 1
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
return idx * 2 + 2
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
return self.heap_dict[key]
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = len(_lowercase ) - 1
snake_case_ : Optional[int] = self.get_parent_idx(_lowercase )
for idx, i in enumerate(_lowercase ):
snake_case_ : Dict = idx
snake_case_ : Optional[int] = i.val
for i in range(_lowercase , -1 , -1 ):
self.sift_down(_lowercase , _lowercase )
return array
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
while True:
snake_case_ : Optional[int] = self.get_left_child_idx(_lowercase ) # noqa: E741
snake_case_ : Optional[Any] = self.get_right_child_idx(_lowercase )
snake_case_ : Union[str, Any] = idx
if l < len(_lowercase ) and array[l] < array[idx]:
snake_case_ : Dict = l
if r < len(_lowercase ) and array[r] < array[smallest]:
snake_case_ : str = r
if smallest != idx:
snake_case_ : str = array[smallest], array[idx]
(
snake_case_
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ : List[Any] = smallest
else:
break
def UpperCAmelCase__ ( self , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_parent_idx(_lowercase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ : Dict = self.heap[idx], self.heap[p]
snake_case_ : str = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ : Union[str, Any] = p
snake_case_ : Any = self.get_parent_idx(_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = self.heap[-1], self.heap[0]
snake_case_ : List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ : Optional[int] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
self.heap.append(_lowercase )
snake_case_ : Optional[int] = len(self.heap ) - 1
snake_case_ : int = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.heap ) == 0
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ : List[Any] = new_value
snake_case_ : Any = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase : Any = Node('''R''', -1)
__lowerCAmelCase : List[Any] = Node('''B''', 6)
__lowerCAmelCase : Union[str, Any] = Node('''A''', 3)
__lowerCAmelCase : Any = Node('''X''', 1)
__lowerCAmelCase : Tuple = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase : List[str] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
import requests
__lowerCAmelCase : Optional[int] = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 714 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowerCAmelCase ( __UpperCamelCase : int = 5_0_0_0 ):
'''simple docstring'''
snake_case_ : str = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
snake_case_ : Optional[int] = pentagonal_nums[j]
snake_case_ : List[Any] = pentagonal_i + pentagonal_j
snake_case_ : Optional[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
snake_case_ : Optional[Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("""RGB""" )
return image
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = dct.pop(__UpperCamelCase )
snake_case_ : Optional[int] = val
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ : Union[str, Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
snake_case_ : Optional[Any] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
snake_case_ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(__UpperCamelCase , requires_grad=__UpperCamelCase ), v_bias) )
snake_case_ : Optional[Any] = qkv_bias
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = 3_6_4 if """coco""" in model_name else 2_2_4
snake_case_ : str = InstructBlipVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
snake_case_ : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ : int = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
snake_case_ : int = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
snake_case_ : Union[str, Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
snake_case_ : str = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
snake_case_ : List[str] = InstructBlipConfig(vision_config=__UpperCamelCase , text_config=__UpperCamelCase , qformer_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
snake_case_ : Tuple = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
snake_case_ : Any = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
snake_case_ : Union[str, Any] = get_blipa_config(__UpperCamelCase )
snake_case_ : Optional[Any] = InstructBlipForConditionalGeneration(__UpperCamelCase ).eval()
snake_case_ : Tuple = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
snake_case_ : int = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
snake_case_ : List[Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
snake_case_ : Dict = """cuda:2""" if torch.cuda.is_available() else """cpu"""
snake_case_ : Union[str, Any] = load_model_and_preprocess(
name=__UpperCamelCase , model_type=__UpperCamelCase , is_eval=__UpperCamelCase , device=__UpperCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
snake_case_ : Any = original_model.state_dict()
snake_case_ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ : int = state_dict.pop(__UpperCamelCase )
if key.startswith("""Qformer.bert""" ):
snake_case_ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
snake_case_ : Tuple = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
snake_case_ : Dict = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
snake_case_ : Dict = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
snake_case_ : Union[str, Any] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
snake_case_ : List[str] = key.replace("""t5""" , """language""" )
snake_case_ : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase , __UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
snake_case_ : Dict = load_demo_image()
snake_case_ : Optional[Any] = """What is unusual about this image?"""
# create processor
snake_case_ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCamelCase , image_std=__UpperCamelCase )
snake_case_ : List[str] = InstructBlipProcessor(
image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase , qformer_tokenizer=__UpperCamelCase , )
snake_case_ : int = processor(images=__UpperCamelCase , text=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# make sure processor creates exact same pixel values
snake_case_ : Tuple = vis_processors["""eval"""](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case_ : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
snake_case_ : Any = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
snake_case_ : Dict = hf_model(**__UpperCamelCase ).logits
else:
snake_case_ : Union[str, Any] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
snake_case_ : List[Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
snake_case_ : int = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
snake_case_ : int = hf_model(**__UpperCamelCase , labels=__UpperCamelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
snake_case_ : str = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __UpperCamelCase , atol=__UpperCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
snake_case_ : int = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
snake_case_ : str = hf_model.generate(
**__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
snake_case_ : Any = 2
print("""Original generation:""" , __UpperCamelCase )
snake_case_ : List[Any] = processor.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
snake_case_ : str = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
__lowerCAmelCase : Optional[Any] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__lowerCAmelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 716 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = ['''model.decoder.embed_positions.weights''']
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if "emb" in name:
snake_case_ : int = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
snake_case_ : Union[str, Any] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
snake_case_ : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
snake_case_ : List[str] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
snake_case_ : Any = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
snake_case_ : Optional[Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
snake_case_ : Union[str, Any] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
snake_case_ : Optional[int] = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
snake_case_ : List[Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
snake_case_ : Optional[Any] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ : List[str] = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCAmelCase ( __UpperCamelCase : OrderedDict , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[str] = list(state_dict.keys() )
snake_case_ : int = {}
for key in keys:
snake_case_ : Optional[int] = state_dict.pop(__UpperCamelCase )
snake_case_ : int = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ : int = val[:hidden_size, :]
snake_case_ : int = val[hidden_size : 2 * hidden_size, :]
snake_case_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ : Union[str, Any] = val
else:
snake_case_ : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
snake_case_ : str = 1_0_2_4
snake_case_ : List[str] = 2_4
snake_case_ : List[Any] = 1_6
elif checkpoint == "medium":
snake_case_ : List[str] = 1_5_3_6
snake_case_ : str = 4_8
snake_case_ : List[Any] = 2_4
elif checkpoint == "large":
snake_case_ : Any = 2_0_4_8
snake_case_ : Dict = 4_8
snake_case_ : str = 3_2
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
snake_case_ : Dict = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , )
return config
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple=None , __UpperCamelCase : str=None , __UpperCamelCase : Optional[Any]="cpu" ):
'''simple docstring'''
snake_case_ : Any = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase )
snake_case_ : List[Any] = decoder_config_from_checkpoint(__UpperCamelCase )
snake_case_ : Optional[Any] = fairseq_model.lm.state_dict()
snake_case_ : int = rename_state_dict(
__UpperCamelCase , hidden_size=decoder_config.hidden_size )
snake_case_ : Union[str, Any] = TaEncoderModel.from_pretrained("""t5-base""" )
snake_case_ : List[str] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
snake_case_ : Dict = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ : List[Any] = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(__UpperCamelCase ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
snake_case_ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
snake_case_ : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ : Any = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""t5-base""" )
snake_case_ : Dict = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
snake_case_ : Dict = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
snake_case_ : Union[str, Any] = 2_0_4_8
snake_case_ : Optional[int] = 2_0_4_8
# set other default generation config params
snake_case_ : Dict = int(3_0 * audio_encoder.config.frame_rate )
snake_case_ : Optional[Any] = True
snake_case_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 717 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 0 |
import cmath
import math
def __lowerCAmelCase ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ):
'''simple docstring'''
snake_case_ : List[Any] = math.radians(__UpperCamelCase )
snake_case_ : Union[str, Any] = math.radians(__UpperCamelCase )
# Convert voltage and current to rectangular form
snake_case_ : Union[str, Any] = cmath.rect(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = cmath.rect(__UpperCamelCase , __UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = str(id_ )
snake_case_ : Optional[int] = None
snake_case_ : Dict = None
snake_case_ : Tuple = []
snake_case_ : int = {} # {vertex:distance}
def __lt__( self , _lowercase ) -> str:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> int:
'''simple docstring'''
return self.id
def UpperCAmelCase__ ( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.neighbors.append(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : Dict = weight
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : Vertex ):
'''simple docstring'''
snake_case_ : Optional[int] = []
for u in graph:
snake_case_ : Optional[Any] = math.inf
snake_case_ : Optional[int] = None
snake_case_ : Tuple = 0
snake_case_ : Tuple = graph[:]
while q:
snake_case_ : Union[str, Any] = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case_ : Optional[int] = u
snake_case_ : int = u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : Vertex ):
'''simple docstring'''
for u in graph:
snake_case_ : Optional[Any] = math.inf
snake_case_ : str = None
snake_case_ : Optional[int] = 0
snake_case_ : int = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
snake_case_ : Optional[Any] = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case_ : List[Any] = u
snake_case_ : Dict = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __lowerCAmelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
__lowerCAmelCase : Any = range(2, 20 + 1)
__lowerCAmelCase : Optional[int] = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
snake_case_ : str = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
snake_case_ : str = 0, 0
snake_case_ : int = n - i
snake_case_ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
snake_case_ : Any = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
snake_case_ : Optional[int] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ : Union[str, Any] = _k
break
if max_jump >= 0:
snake_case_ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ : List[str] = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
snake_case_ : List[str] = divmod(__UpperCamelCase , 1_0 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
snake_case_ : Tuple = []
else:
snake_case_ : List[Any] = {c: []}
snake_case_ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_ : Tuple = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_ : List[str] = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
snake_case_ : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ : Union[str, Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ : str = i
snake_case_ : List[str] = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ : Union[str, Any] = ds_c + ds_b
diff += addend
snake_case_ : Tuple = 0
for j in range(__UpperCamelCase ):
snake_case_ : Any = a_i[j] + addend
snake_case_ : Optional[int] = divmod(__UpperCamelCase , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
snake_case_ : Tuple = digits[j] + addend
if s >= 1_0:
snake_case_ : Optional[Any] = divmod(__UpperCamelCase , 1_0 )
snake_case_ : Dict = addend // 1_0 + quotient
else:
snake_case_ : Tuple = s
snake_case_ : Union[str, Any] = addend // 1_0
if addend == 0:
break
while addend > 0:
snake_case_ : Dict = divmod(__UpperCamelCase , 1_0 )
digits.append(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0**1_5 ):
'''simple docstring'''
snake_case_ : List[str] = [1]
snake_case_ : Any = 1
snake_case_ : str = 0
while True:
snake_case_ : int = next_term(__UpperCamelCase , 2_0 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
snake_case_ : Tuple = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 720 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case_ : List[Any] = str(bin(__UpperCamelCase ) )[2:]
if shift_amount >= len(__UpperCamelCase ):
return "0b0"
snake_case_ : Dict = binary_number[: len(__UpperCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case_ : List[str] = """0""" + str(bin(__UpperCamelCase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ : Optional[Any] = len(bin(__UpperCamelCase )[3:] ) # Find 2's complement of number
snake_case_ : List[str] = bin(abs(__UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : str = (
"""1""" + """0""" * (binary_number_length - len(__UpperCamelCase )) + binary_number
)
if shift_amount >= len(__UpperCamelCase ):
return "0b" + binary_number[0] * len(__UpperCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__UpperCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowerCAmelCase : Dict = {'''allegro/herbert-base-cased''': 514}
__lowerCAmelCase : Tuple = {}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = HerbertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase="</s>" , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.cls_token_id]
snake_case_ : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 701 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''nat'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=6_4 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 1_6] , _lowercase=7 , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Any = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : Tuple = depths
snake_case_ : int = len(_lowercase )
snake_case_ : Optional[int] = num_heads
snake_case_ : List[str] = kernel_size
snake_case_ : str = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : str = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = drop_path_rate
snake_case_ : Dict = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Union[str, Any] = layer_scale_init_value
snake_case_ : Optional[Any] = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 21 | 0 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowerCAmelCase ( *__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : List[Any] = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Dict = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowerCAmelCase ( __UpperCamelCase : Exception ):
'''simple docstring'''
snake_case_ : List[str] = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowerCAmelCase ( __UpperCamelCase : callable = None , __UpperCamelCase : int = 1_2_8 ):
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
snake_case_ : List[str] = starting_batch_size
def decorator(*__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case_ : Tuple = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
snake_case_ : str = """, """.join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 702 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__lowerCAmelCase : Optional[Any] = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 1_2
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Tuple = 1_2
snake_case_ : Tuple = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case_ : Optional[Any] = TransformeraDModel(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = """cpu"""
snake_case_ : List[str] = self.dummy_vqvae
snake_case_ : Any = self.dummy_text_encoder
snake_case_ : Tuple = self.dummy_tokenizer
snake_case_ : int = self.dummy_transformer
snake_case_ : int = VQDiffusionScheduler(self.num_embed )
snake_case_ : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case_ : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = """teddy bear playing in the pool"""
snake_case_ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Optional[int] = output.images
snake_case_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : int = """cpu"""
snake_case_ : List[Any] = self.dummy_vqvae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[Any] = self.dummy_tokenizer
snake_case_ : Union[str, Any] = self.dummy_transformer
snake_case_ : str = VQDiffusionScheduler(self.num_embed )
snake_case_ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ : Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = """teddy bear playing in the pool"""
snake_case_ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
snake_case_ : Dict = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Any = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case_ : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case_ : str = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case_ : Optional[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : Optional[int] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 21 | 0 |
"""simple docstring"""
import operator
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : bool = False , __UpperCamelCase : list | None = None ):
'''simple docstring'''
snake_case_ : int = operator.lt if reverse else operator.gt
snake_case_ : Tuple = solution or []
if not arr:
return solution
snake_case_ : Optional[Any] = [arr.pop(0 )]
for i, item in enumerate(__UpperCamelCase ):
if _operator(__UpperCamelCase , sublist[-1] ):
sublist.append(__UpperCamelCase )
arr.pop(__UpperCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(__UpperCamelCase )
else:
while sublist:
snake_case_ : Tuple = sublist.pop(0 )
for i, xx in enumerate(__UpperCamelCase ):
if not _operator(__UpperCamelCase , __UpperCamelCase ):
solution.insert(__UpperCamelCase , __UpperCamelCase )
break
else:
solution.append(__UpperCamelCase )
strand_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 703 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : list , __UpperCamelCase : int | None = None , __UpperCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
snake_case_ : int = 0
if end is None:
snake_case_ : int = len(__UpperCamelCase ) - 1
if start >= end:
return
snake_case_ : str = (start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
snake_case_ : str = sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=None , _lowercase=True , _lowercase=True , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = size if size is not None else {"""height""": 2_0, """width""": 2_0}
snake_case_ : List[str] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = num_channels
snake_case_ : Dict = image_size
snake_case_ : List[str] = min_resolution
snake_case_ : str = max_resolution
snake_case_ : List[Any] = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Optional[Any] = do_convert_rgb
snake_case_ : Optional[int] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case_ : List[str] = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
snake_case_ : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_convert_rgb""" ) )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.image_processor_tester.prepare_dummy_image()
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : Dict = 2_0_4_8
snake_case_ : Any = image_processor(_lowercase , return_tensors="""pt""" , max_patches=_lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Optional[int] = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
snake_case_ : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowercase ):
snake_case_ : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
snake_case_ : Dict = """Hello"""
snake_case_ : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Tuple = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
snake_case_ : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Tuple = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Optional[Any] = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case_ : Tuple = 3
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_convert_rgb""" ) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : int = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case_ : Dict = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case_ : Any = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 705 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = [0] * len(__UpperCamelCase )
snake_case_ : List[str] = []
snake_case_ : Any = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
snake_case_ : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case_ : Union[str, Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCAmelCase : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 21 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''upernet'''
def __init__( self , _lowercase=None , _lowercase=5_1_2 , _lowercase=0.02 , _lowercase=[1, 2, 3, 6] , _lowercase=True , _lowercase=0.4 , _lowercase=3_8_4 , _lowercase=2_5_6 , _lowercase=1 , _lowercase=False , _lowercase=2_5_5 , **_lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[Any] = backbone_config.get("""model_type""" )
snake_case_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Any = config_class.from_dict(_lowercase )
snake_case_ : str = backbone_config
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Dict = initializer_range
snake_case_ : Union[str, Any] = pool_scales
snake_case_ : Optional[Any] = use_auxiliary_head
snake_case_ : Any = auxiliary_loss_weight
snake_case_ : List[Any] = auxiliary_in_channels
snake_case_ : str = auxiliary_channels
snake_case_ : Dict = auxiliary_num_convs
snake_case_ : List[Any] = auxiliary_concat_input
snake_case_ : int = loss_ignore_index
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.backbone_config.to_dict()
snake_case_ : Dict = self.__class__.model_type
return output
| 706 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
snake_case_ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(__UpperCamelCase ):
result *= n - i
result //= i + 1
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __UpperCamelCase ) // (node_count + 1)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
snake_case_ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return catalan_number(__UpperCamelCase ) * factorial(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
(snake_case_) : Union[str, Any] = extended_euclid(__UpperCamelCase , a % b )
snake_case_ : Optional[int] = a // b
return (y, x - k * y)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
(snake_case_) : List[str] = extended_euclid(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = na * na
snake_case_ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
(snake_case_) : List[str] = extended_euclid(__UpperCamelCase , __UpperCamelCase )
if b < 0:
snake_case_ : Dict = (b % n + n) % n
return b
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Optional[int] = invert_modulo(__UpperCamelCase , __UpperCamelCase ), invert_modulo(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = na * na
snake_case_ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 707 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''swin'''
_lowerCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowercase=2_2_4 , _lowercase=4 , _lowercase=3 , _lowercase=9_6 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 1_2, 2_4] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=3_2 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : str = image_size
snake_case_ : int = patch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = embed_dim
snake_case_ : Optional[int] = depths
snake_case_ : Optional[int] = len(_lowercase )
snake_case_ : Optional[Any] = num_heads
snake_case_ : Optional[Any] = window_size
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = drop_path_rate
snake_case_ : List[Any] = hidden_act
snake_case_ : str = use_absolute_embeddings
snake_case_ : str = layer_norm_eps
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Tuple = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
snake_case_ : Tuple = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
snake_case_ , snake_case_ : Any = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 21 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase : Optional[int] = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__lowerCAmelCase : Optional[int] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = NllbTokenizer
_lowerCamelCase = []
_lowerCamelCase = []
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
snake_case_ : int = legacy_behaviour
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , legacy_behaviour=_lowercase , **_lowercase , )
snake_case_ : Tuple = vocab_file
snake_case_ : Dict = False if not self.vocab_file else True
snake_case_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(_lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Any = src_lang if src_lang is not None else """eng_Latn"""
snake_case_ : int = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : Optional[int] = src_lang
snake_case_ : int = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
snake_case_ : List[Any] = self.convert_tokens_to_ids(_lowercase )
snake_case_ : str = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self , _lowercase , _lowercase = "eng_Latn" , _lowercase = None , _lowercase = "fra_Latn" , **_lowercase , ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Any = src_lang
snake_case_ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = self.convert_tokens_to_ids(_lowercase )
if self.legacy_behaviour:
snake_case_ : List[str] = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ : List[str] = [self.cur_lang_code]
snake_case_ : Union[str, Any] = [self.eos_token_id]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Dict = self.convert_tokens_to_ids(_lowercase )
if self.legacy_behaviour:
snake_case_ : Tuple = []
snake_case_ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ : List[str] = [self.cur_lang_code]
snake_case_ : int = [self.eos_token_id]
snake_case_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : Any = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 708 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
snake_case_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
snake_case_ : Optional[Any] = shift_tokens_right(_lowercase , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case_ : Tuple = model(_lowercase , decoder_input_ids=_lowercase ).logits
snake_case_ : Tuple = optax.softmax_cross_entropy(_lowercase , onehot(_lowercase , logits.shape[-1] ) ).mean()
snake_case_ : List[str] = -(labels.shape[-1] * loss.item())
snake_case_ : Optional[int] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 21 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
snake_case_ : Any = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ : Dict = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
snake_case_ : str = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
snake_case_ : int = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
snake_case_ : Union[str, Any] = -(labels.shape[-1] * loss.item())
snake_case_ : List[Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 709 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowerCAmelCase : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ):
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case_ : Dict = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
snake_case_ : Optional[Any] = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
snake_case_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ : Any = value
elif weight_type == "weight_g":
snake_case_ : Optional[int] = value
elif weight_type == "weight_v":
snake_case_ : Union[str, Any] = value
elif weight_type == "bias":
snake_case_ : Optional[int] = value
else:
snake_case_ : List[Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Dict = fairseq_model.state_dict()
snake_case_ : Any = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ : Tuple = None
for name, value in fairseq_dict.items():
snake_case_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ : Dict = True
elif name.split(""".""" )[0] == "proj":
snake_case_ : List[Any] = fairseq_model.proj
snake_case_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ : Union[str, Any] = True
if "*" in mapped_key:
snake_case_ : Union[str, Any] = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
snake_case_ : Tuple = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
snake_case_ : Dict = """weight_g"""
elif "weight_v" in name:
snake_case_ : List[str] = """weight_v"""
elif "bias" in name:
snake_case_ : List[str] = """bias"""
elif "weight" in name:
snake_case_ : List[str] = """weight"""
else:
snake_case_ : Tuple = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = full_name.split("""conv_layers.""" )[-1]
snake_case_ : str = name.split(""".""" )
snake_case_ : Any = int(items[0] )
snake_case_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ : Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ : Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Dict = emb.weight.shape
snake_case_ : List[Any] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
snake_case_ : Dict = emb.weight.data
return lin_layer
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.readlines()
snake_case_ : Optional[Any] = [line.split(""" """ )[0] for line in lines]
snake_case_ : Optional[int] = len(__UpperCamelCase )
snake_case_ : Union[str, Any] = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple , ):
'''simple docstring'''
snake_case_ : List[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
snake_case_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
snake_case_ : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ : Any = WavaVecaModel(__UpperCamelCase )
snake_case_ : List[str] = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
snake_case_ : List[str] = SpeechaTextaForCausalLM(__UpperCamelCase )
snake_case_ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
snake_case_ : int = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ : Any = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
snake_case_ : Tuple = False
# add projection layer
snake_case_ : int = nn.Parameter(projection_layer.weight )
snake_case_ : Union[str, Any] = nn.Parameter(projection_layer.bias )
snake_case_ : Dict = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """vocab.json""" ) , """w""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , """vocab.json""" ) )
tokenizer.save_pretrained(__UpperCamelCase )
snake_case_ : Dict = hf_wavavec.config.to_dict()
snake_case_ : List[str] = tokenizer.pad_token_id
snake_case_ : Union[str, Any] = tokenizer.bos_token_id
snake_case_ : str = tokenizer.eos_token_id
snake_case_ : str = """speech_to_text_2"""
snake_case_ : Any = """wav2vec2"""
snake_case_ : Dict = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__lowerCAmelCase : Any = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 710 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ):
'''simple docstring'''
snake_case_ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
snake_case_ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Optional[int] = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
snake_case_ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
snake_case_ : str = [(pred, label)]
snake_case_ , snake_case_ : List[str] = [], []
for question, preds_labels in question_map.items():
snake_case_ , snake_case_ : Optional[Any] = zip(*__UpperCamelCase )
snake_case_ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
snake_case_ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
snake_case_ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
snake_case_ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
snake_case_ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
snake_case_ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
snake_case_ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 21 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ShapEPipeline
_lowerCamelCase = ['''prompt''']
_lowerCamelCase = ['''prompt''']
_lowerCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowerCamelCase = False
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 3_2
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 8
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_6,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 3_2,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
snake_case_ : Any = PriorTransformer(**_lowercase )
return model
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = {
"""param_shapes""": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 1_2,
"""background""": (
0.1,
0.1,
0.1,
),
}
snake_case_ : str = ShapERenderer(**_lowercase )
return model
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.dummy_prior
snake_case_ : List[str] = self.dummy_text_encoder
snake_case_ : Optional[Any] = self.dummy_tokenizer
snake_case_ : Optional[Any] = self.dummy_renderer
snake_case_ : Any = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_0_2_4 , prediction_type="""sample""" , use_karras_sigmas=_lowercase , clip_sample=_lowercase , clip_sample_range=1.0 , )
snake_case_ : Union[str, Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self , _lowercase , _lowercase=0 ) -> int:
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
snake_case_ : Tuple = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
snake_case_ : Optional[Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 3_2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = """cpu"""
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**_lowercase )
snake_case_ : List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : List[Any] = pipe(**self.get_dummy_inputs(_lowercase ) )
snake_case_ : List[Any] = output.images[0]
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case_ : Any = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = torch_device == """cpu"""
snake_case_ : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowercase , relax_max_difference=_lowercase , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.get_dummy_components()
snake_case_ : str = self.pipeline_class(**_lowercase )
snake_case_ : List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Dict = 1
snake_case_ : Optional[int] = 2
snake_case_ : Tuple = self.get_dummy_inputs(_lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case_ : Dict = batch_size * [inputs[key]]
snake_case_ : Tuple = pipe(**_lowercase , num_images_per_prompt=_lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
snake_case_ : Any = ShapEPipeline.from_pretrained("""openai/shap-e""" )
snake_case_ : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case_ : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case_ : int = pipe(
"""a shark""" , generator=_lowercase , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="""np""" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 711 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Dict = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Tuple = num_choices
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[str] = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = True
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : List[str] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : str = model(_lowercase )[0]
snake_case_ : int = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
snake_case_ : Tuple = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowercase )
snake_case_ : Dict = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
snake_case_ : Any = model(_lowercase )[0]
# compare the actual values for a slice.
snake_case_ : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = 1.5
snake_case_ : Any = int(factor * num_class_images )
snake_case_ : Any = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=__UpperCamelCase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ : Dict = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ : Union[str, Any] = int(factor * num_images )
snake_case_ : str = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
snake_case_ : Optional[Any] = 0
snake_case_ : int = 0
snake_case_ : str = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F'{class_data_dir}/caption.txt' , """w""" ) as fa, open(F'{class_data_dir}/urls.txt' , """w""" ) as fa, open(
F'{class_data_dir}/images.txt' , """w""" ) as fa:
while total < num_class_images:
snake_case_ : int = class_images[count]
count += 1
try:
snake_case_ : List[Any] = requests.get(images["""url"""] )
if img.status_code == 2_0_0:
snake_case_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'{class_data_dir}/images/{total}.jpg' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_0_0 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 712 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _lowerCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''sentencepiece''']
def __init__( self , *_lowercase , **_lowercase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 713 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = RoFormerTokenizer
_lowerCamelCase = RoFormerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **_lowercase ) -> str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = """永和服装饰品有限公司,今天天气非常好"""
snake_case_ : int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ , snake_case_ : Optional[Any] = self.get_chinese_input_output_texts()
snake_case_ : List[str] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : str = tokens + [tokenizer.unk_token]
snake_case_ : Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ , snake_case_ : List[Any] = self.get_chinese_input_output_texts()
snake_case_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Union[str, Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Any = size
snake_case_ : Tuple = [0] * size
snake_case_ : Dict = [0] * size
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( _lowercase ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : List[str] = value
while index < self.size:
snake_case_ : Any = self.get_prev(_lowercase ) + 1
if current_left_border == index:
snake_case_ : Tuple = value
else:
snake_case_ : Dict = max(_lowercase , _lowercase , _lowercase )
snake_case_ : str = self.get_next(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case_ : Tuple = 0
while left <= right:
snake_case_ : str = self.get_prev(_lowercase )
if left <= current_left:
snake_case_ : int = max(_lowercase , self.tree[right] )
snake_case_ : Dict = current_left
else:
snake_case_ : List[Any] = max(_lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
snake_case_ : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
snake_case_ : Dict = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ : Optional[int] = primes[:idx]
break
snake_case_ , snake_case_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ : List[str] = False
for r in range(__UpperCamelCase ):
snake_case_ : int = pow(__UpperCamelCase , d * 2**r , __UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 21 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ['''input_features''', '''is_longer''']
def __init__( self , _lowercase=6_4 , _lowercase=4_8_0_0_0 , _lowercase=4_8_0 , _lowercase=1_0 , _lowercase=1_0_2_4 , _lowercase=0.0 , _lowercase=False , _lowercase = 0 , _lowercase = 1_4_0_0_0 , _lowercase = None , _lowercase = "fusion" , _lowercase = "repeatpad" , **_lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
snake_case_ : Optional[Any] = top_db
snake_case_ : int = truncation
snake_case_ : Tuple = padding
snake_case_ : Any = fft_window_size
snake_case_ : Optional[int] = (fft_window_size >> 1) + 1
snake_case_ : Dict = hop_length
snake_case_ : List[str] = max_length_s
snake_case_ : str = max_length_s * sampling_rate
snake_case_ : str = sampling_rate
snake_case_ : Dict = frequency_min
snake_case_ : Optional[Any] = frequency_max
snake_case_ : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm=_lowercase , mel_scale="""htk""" , )
snake_case_ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowercase , min_frequency=_lowercase , max_frequency=_lowercase , sampling_rate=_lowercase , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = spectrogram(
_lowercase , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowercase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ : Any = [0]
# randomly choose index for each part
snake_case_ : List[str] = np.random.choice(ranges[0] )
snake_case_ : List[str] = np.random.choice(ranges[1] )
snake_case_ : Union[str, Any] = np.random.choice(ranges[2] )
snake_case_ : Optional[Any] = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ : Optional[Any] = torch.tensor(mel[None, None, :] )
snake_case_ : Tuple = torch.nn.functional.interpolate(
_lowercase , size=[chunk_frames, 6_4] , mode="""bilinear""" , align_corners=_lowercase )
snake_case_ : Optional[Any] = mel_shrink[0][0].numpy()
snake_case_ : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ : List[str] = len(_lowercase ) - max_length
snake_case_ : Union[str, Any] = np.random.randint(0 , overflow + 1 )
snake_case_ : Tuple = waveform[idx : idx + max_length]
snake_case_ : Optional[Any] = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ : Optional[Any] = self._np_extract_fbank_features(_lowercase , self.mel_filters )
snake_case_ : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ : List[str] = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ : int = False
else:
snake_case_ : List[str] = self._random_mel_fusion(_lowercase , _lowercase , _lowercase )
snake_case_ : List[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
snake_case_ : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ : str = int(max_length / len(_lowercase ) )
snake_case_ : Union[str, Any] = np.stack(np.tile(_lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ : List[str] = int(max_length / len(_lowercase ) )
snake_case_ : Tuple = np.stack(np.tile(_lowercase , _lowercase ) )
snake_case_ : int = np.pad(_lowercase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
snake_case_ : Dict = self._np_extract_fbank_features(_lowercase , self.mel_filters )
snake_case_ : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ : int = self._np_extract_fbank_features(_lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ) -> BatchFeature:
'''simple docstring'''
snake_case_ : Union[str, Any] = truncation if truncation is not None else self.truncation
snake_case_ : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case_ : Optional[Any] = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
snake_case_ : Optional[int] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : List[str] = [np.asarray(_lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
snake_case_ : str = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[int] = [np.asarray(_lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ : Dict = [
self._get_input_mel(_lowercase , max_length if max_length else self.nb_max_samples , _lowercase , _lowercase )
for waveform in raw_speech
]
snake_case_ : int = []
snake_case_ : str = []
for mel, longer in padded_inputs:
input_mel.append(_lowercase )
is_longer.append(_lowercase )
if truncation == "fusion" and sum(_lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ : Dict = np.random.randint(0 , len(_lowercase ) )
snake_case_ : List[str] = True
if isinstance(input_mel[0] , _lowercase ):
snake_case_ : int = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ : Dict = [[longer] for longer in is_longer]
snake_case_ : str = {"""input_features""": input_mel, """is_longer""": is_longer}
snake_case_ : Tuple = BatchFeature(_lowercase )
if return_tensors is not None:
snake_case_ : Any = input_features.convert_to_tensors(_lowercase )
return input_features
| 715 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase = "" , _lowercase = False ) -> None:
'''simple docstring'''
snake_case_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case_ : int = is_leaf
snake_case_ : Union[str, Any] = prefix
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[str, str, str]:
'''simple docstring'''
snake_case_ : Optional[int] = 0
for q, w in zip(self.prefix , _lowercase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
for word in words:
self.insert(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
if self.prefix == word:
snake_case_ : Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case_ : Optional[int] = RadixNode(prefix=_lowercase , is_leaf=_lowercase )
else:
snake_case_ : List[Any] = self.nodes[word[0]]
snake_case_ : Any = incoming_node.match(
_lowercase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_lowercase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case_ : Any = remaining_prefix
snake_case_ : int = self.nodes[matching_string[0]]
snake_case_ : int = RadixNode(_lowercase , _lowercase )
snake_case_ : List[Any] = aux_node
if remaining_word == "":
snake_case_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> bool:
'''simple docstring'''
snake_case_ : List[str] = self.nodes.get(word[0] , _lowercase )
if not incoming_node:
return False
else:
snake_case_ : Optional[int] = incoming_node.match(
_lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> bool:
'''simple docstring'''
snake_case_ : List[str] = self.nodes.get(word[0] , _lowercase )
if not incoming_node:
return False
else:
snake_case_ : Optional[int] = incoming_node.match(
_lowercase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_lowercase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case_ : List[str] = list(self.nodes.values() )[0]
snake_case_ : Dict = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case_ : List[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case_ : int = False
# If there is 1 edge, we merge it with its child
else:
snake_case_ : Tuple = list(incoming_node.nodes.values() )[0]
snake_case_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case_ : Union[str, Any] = merging_node.nodes
return True
def UpperCAmelCase__ ( self , _lowercase = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = """banana bananas bandana band apple all beast""".split()
snake_case_ : List[str] = RadixNode()
root.insert_many(__UpperCamelCase )
assert all(root.find(__UpperCamelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCAmelCase ( ):
'''simple docstring'''
assert test_trie()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = RadixNode()
snake_case_ : Any = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__UpperCamelCase )
print("""Words:""" , __UpperCamelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 0 |
"""simple docstring"""
from functools import reduce
__lowerCAmelCase : Optional[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowerCAmelCase ( __UpperCamelCase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCamelCase , __UpperCamelCase : str(int(__UpperCamelCase ) * int(__UpperCamelCase ) ) , n[i : i + 1_3] ) )
for i in range(len(__UpperCamelCase ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : List[str] = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : int = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") )
return token
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case_ : Optional[Any] = 1_0_0_0
snake_case_ : Any = """huggingface/label-files"""
snake_case_ : Tuple = num_labels
snake_case_ : Dict = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case_ : str = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : List[str] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
snake_case_ : Dict = CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case_ : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case_ : Any = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case_ : Optional[int] = [2, 2, 2_0]
snake_case_ : str = [3, 1_2, 1_6]
snake_case_ : Any = [1_9_2, 7_6_8, 1_0_2_4]
snake_case_ : Union[str, Any] = CvtForImageClassification(__UpperCamelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case_ : List[Any] = image_size
snake_case_ : str = torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
snake_case_ : Any = OrderedDict()
snake_case_ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case_ : Optional[Any] = list_of_state_dict + cls_token(__UpperCamelCase )
snake_case_ : str = list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
snake_case_ : List[str] = list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
snake_case_ : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
snake_case_ : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 21 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """cache"""
snake_case_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : str = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : Optional[Any] = features.copy() if features else default_expected_features
snake_case_ : Union[str, Any] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Optional[Any] = ParquetDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path / """cache"""
snake_case_ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : str = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
'''simple docstring'''
if issubclass(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Union[str, Any] = parquet_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Any = [parquet_path]
snake_case_ : int = tmp_path / """cache"""
snake_case_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : Union[str, Any] = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_dataset(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=("train",) ):
'''simple docstring'''
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
snake_case_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : Optional[int] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """cache"""
snake_case_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : Optional[int] = features.copy() if features else default_expected_features
snake_case_ : Optional[int] = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : Optional[Any] = ParquetDatasetReader({"""train""": parquet_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
'''simple docstring'''
if split:
snake_case_ : Optional[int] = {split: parquet_path}
else:
snake_case_ : Tuple = """train"""
snake_case_ : List[Any] = {"""train""": parquet_path, """test""": parquet_path}
snake_case_ : Any = tmp_path / """cache"""
snake_case_ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case_ : str = ParquetDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_parquet_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : int = ParquetDatasetWriter(__UpperCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case_ : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
snake_case_ : Optional[int] = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Tuple = str(shared_datadir / """test_image_rgb.jpg""" )
snake_case_ : Union[str, Any] = {"""image""": [image_path]}
snake_case_ : Optional[int] = Features({"""image""": Image()} )
snake_case_ : str = Dataset.from_dict(__UpperCamelCase , features=__UpperCamelCase )
snake_case_ : Optional[int] = ParquetDatasetWriter(__UpperCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case_ : Optional[int] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
snake_case_ : int = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
assert get_writer_batch_size(__UpperCamelCase ) == expected
| 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case_ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case_ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def UpperCAmelCase__ ( self , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = """tester"""
snake_case_ : Tuple = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ : Optional[Any] = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
snake_case_ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
snake_case_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
snake_case_ , snake_case_ : Union[str, Any] = self.get_input_output_texts(_lowercase )
snake_case_ : List[Any] = tokenizer.tokenize(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case_ : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
snake_case_ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
| 21 | 0 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCAmelCase : Optional[int] = 2048
__lowerCAmelCase : int = 4096
__lowerCAmelCase : Union[str, Any] = 42
__lowerCAmelCase : List[Any] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
__lowerCAmelCase : Union[str, Any] = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
def choose_first(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any=False ):
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
snake_case_ : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case_ : Optional[int] = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
snake_case_ : str = {"""id""": example["""id"""]}
snake_case_ : Tuple = example["""annotations"""]
snake_case_ : Dict = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case_ : Union[str, Any] = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = ["""<cls>"""]
else:
snake_case_ : Dict = ["""short"""]
snake_case_ : Any = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
snake_case_ : Optional[int] = ["""long"""]
snake_case_ : List[str] = choose_first(annotation["""long_answer"""] , is_long_answer=__UpperCamelCase )
snake_case_ : Tuple = []
answer.update(__UpperCamelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case_ : Optional[Any] = True
else:
snake_case_ : List[str] = False
snake_case_ : List[Any] = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , __UpperCamelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
snake_case_ : Dict = _get_single_answer(__UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case_ : Any = example["""document"""]["""tokens"""]
snake_case_ : Optional[int] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(__UpperCamelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case_ : Tuple = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case_ : Union[str, Any] = example["""document"""]["""tokens"""]
snake_case_ : Dict = answer["""start_token"""]
snake_case_ : Union[str, Any] = answer["""end_token"""]
snake_case_ : Tuple = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case_ : int = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case_ : List[Any] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case_ : Dict = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case_ : Optional[Any] = """ """.join([old[i] for i in range(len(__UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , __UpperCamelCase , end="""\n""" )
print("""Old:""" , __UpperCamelCase , end="""\n\n""" )
return {
"context": " ".join(__UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Dict=2_0_4_8 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : Optional[int]=True ):
'''simple docstring'''
snake_case_ : Tuple = get_context_and_ans(__UpperCamelCase , assertion=__UpperCamelCase )
snake_case_ : Optional[int] = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case_ : str = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
snake_case_ : Dict = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case_ : Tuple = []
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = input_ids[:q_len]
snake_case_ : List[Any] = range(__UpperCamelCase , len(__UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case_ : Any = i + max_length - q_len
snake_case_ : List[str] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__UpperCamelCase ),
"end_token": [-1_0_0] * len(__UpperCamelCase ),
"category": category,
},
}
snake_case_ : List[Any] = out["""context"""].split()
snake_case_ : Union[str, Any] = splitted_context[answer["""end_token"""]]
snake_case_ : str = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=__UpperCamelCase , ).input_ids )
snake_case_ : Tuple = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=__UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case_ : str = len(tokenizer(__UpperCamelCase , add_special_tokens=__UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case_ : List[str] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
snake_case_ : Optional[int] = answer["""start_token"""]
snake_case_ : List[str] = answer["""end_token"""]
if assertion:
snake_case_ : str = tokenizer.decode(__UpperCamelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , __UpperCamelCase , end="""\n\n""" )
if len(__UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case_ : int = input_ids[:q_len]
snake_case_ : Tuple = range(__UpperCamelCase , len(__UpperCamelCase ) , max_length - doc_stride )
snake_case_ : Dict = []
snake_case_ : Optional[Any] = []
snake_case_ : Dict = []
snake_case_ : Dict = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case_ : List[Any] = i + max_length - q_len
snake_case_ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case_ : Any = start_token - i + q_len
snake_case_ : Tuple = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
snake_case_ : Union[str, Any] = -1_0_0
snake_case_ : Union[str, Any] = -1_0_0
answers_category.append("""null""" )
snake_case_ : List[str] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__UpperCamelCase )
answers_end_token.append(__UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(__UpperCamelCase ) )
print("""Old:""" , tokenizer.decode(__UpperCamelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : int=2_0_4_8 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
snake_case_ : int = get_strided_contexts_and_ans(
__UpperCamelCase , __UpperCamelCase , doc_stride=__UpperCamelCase , max_length=__UpperCamelCase , assertion=__UpperCamelCase , )
return example
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
with jsonlines.open(__UpperCamelCase , """a""" ) as writer:
for example in tqdm(__UpperCamelCase , total=len(__UpperCamelCase ) , desc="""Saving samples ... """ ):
snake_case_ : int = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCAmelCase : Tuple = load_dataset('''natural_questions''')
__lowerCAmelCase : Optional[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__lowerCAmelCase : Any = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__lowerCAmelCase : Tuple = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__lowerCAmelCase : Optional[int] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCAmelCase : int = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__lowerCAmelCase : str = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : str = []
snake_case_ : Optional[Any] = 1_1
snake_case_ : Any = int("""1""" + """0""" * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
snake_case_ : Optional[Any] = 1_0
return solutions
def __lowerCAmelCase ( __UpperCamelCase : int = 2 ):
'''simple docstring'''
snake_case_ : Dict = 1.0
for fraction in fraction_list(__UpperCamelCase ):
snake_case_ : Dict = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 720 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : int = 1_0
snake_case_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ : Tuple = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(__UpperCamelCase ) ),
} , features=__UpperCamelCase , )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return filename
# FILE_CONTENT + files
__lowerCAmelCase : List[Any] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ : Optional[Any] = FILE_CONTENT
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ : Any = bytes(__UpperCamelCase , """utf-8""" )
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
import gzip
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ : List[Any] = bytes(__UpperCamelCase , """utf-8""" )
with gzip.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ : Optional[Any] = bytes(__UpperCamelCase , """utf-8""" )
with lza.frame.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__UpperCamelCase , """w""" ) as archive:
archive.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
'''simple docstring'''
import tarfile
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
import lzma
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ : str = bytes(__UpperCamelCase , """utf-8""" )
with lzma.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
snake_case_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ : Tuple = bytes(__UpperCamelCase , """utf-8""" )
with zstd.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase )
return filename
__lowerCAmelCase : List[str] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__lowerCAmelCase : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__lowerCAmelCase : int = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__lowerCAmelCase : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__lowerCAmelCase : Any = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = datasets.Dataset.from_dict(__UpperCamelCase )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__UpperCamelCase ) ) as con:
snake_case_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : Optional[Any] = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__UpperCamelCase , """w""" , newline="""""" ) as f:
snake_case_ : str = csv.DictWriter(__UpperCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : int ):
'''simple docstring'''
import bza
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__UpperCamelCase , """rb""" ) as f:
snake_case_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCamelCase , """wb""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(__UpperCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__UpperCamelCase , """wb""" ) as f:
snake_case_ : Optional[int] = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase )
snake_case_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCamelCase ) )] for k in DATA[0]} , schema=__UpperCamelCase )
writer.write_table(__UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : Any = {"""data""": DATA}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
import gzip
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__UpperCamelCase , """rb""" ) as orig_file:
with gzip.open(__UpperCamelCase , """wb""" ) as zipped_file:
zipped_file.writelines(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.add(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""nested""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : str = ["""0""", """1""", """2""", """3"""]
snake_case_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : int = ["""0""", """1""", """2""", """3"""]
snake_case_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : List[Any] = ["""0""", """1""", """2""", """3"""]
snake_case_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__UpperCamelCase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
f.write(__UpperCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(__UpperCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
'''simple docstring'''
snake_case_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__UpperCamelCase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : List[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__UpperCamelCase , """w""" ) as f:
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ) )
f.write(__UpperCamelCase , arcname=os.path.basename(__UpperCamelCase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 21 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
# TODO Update this
__lowerCAmelCase : List[Any] = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''esm'''
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_0_2_6 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase="absolute" , _lowercase=True , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , mask_token_id=_lowercase , **_lowercase )
snake_case_ : List[str] = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Optional[Any] = position_embedding_type
snake_case_ : str = use_cache
snake_case_ : Any = emb_layer_norm_before
snake_case_ : Optional[int] = token_dropout
snake_case_ : Union[str, Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
snake_case_ : Tuple = EsmFoldConfig()
elif isinstance(_lowercase , _lowercase ):
snake_case_ : List[Any] = EsmFoldConfig(**_lowercase )
snake_case_ : Tuple = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
snake_case_ : Union[str, Any] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : Any = None
snake_case_ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , _lowercase ):
snake_case_ : Any = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = 0
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = 128
_lowerCamelCase = None
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , _lowercase ):
snake_case_ : Optional[Any] = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : List[Any] = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 48
_lowerCamelCase = 1_024
_lowerCamelCase = 128
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 32
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = False
_lowerCamelCase = 4
_lowerCamelCase = 128
_lowerCamelCase = None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : List[str] = StructureModuleConfig()
elif isinstance(self.structure_module , _lowercase ):
snake_case_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
snake_case_ : List[Any] = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = asdict(self )
snake_case_ : int = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 384
_lowerCamelCase = 128
_lowerCamelCase = 16
_lowerCamelCase = 128
_lowerCamelCase = 12
_lowerCamelCase = 4
_lowerCamelCase = 8
_lowerCamelCase = 0.1
_lowerCamelCase = 8
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 7
_lowerCamelCase = 10
_lowerCamelCase = 1E-8
_lowerCamelCase = 1E5
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return asdict(self )
def __lowerCAmelCase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 721 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
snake_case_ : List[str] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : Union[str, Any] = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
snake_case_ : int = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''efficientnet'''
def __init__( self : Optional[int] ,A_ : int = 3 ,A_ : int = 600 ,A_ : float = 2.0 ,A_ : float = 3.1 ,A_ : int = 8 ,A_ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,A_ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,A_ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,A_ : List[int] = [] ,A_ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,A_ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,A_ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,A_ : float = 0.25 ,A_ : str = "swish" ,A_ : int = 2560 ,A_ : str = "mean" ,A_ : float = 0.02 ,A_ : float = 0.0_01 ,A_ : float = 0.99 ,A_ : float = 0.5 ,A_ : float = 0.2 ,**A_ : List[Any] ,) -> Optional[Any]:
super().__init__(**A_ )
A = num_channels
A = image_size
A = width_coefficient
A = depth_coefficient
A = depth_divisor
A = kernel_sizes
A = in_channels
A = out_channels
A = depthwise_padding
A = strides
A = num_block_repeats
A = expand_ratios
A = squeeze_expansion_ratio
A = hidden_act
A = hidden_dim
A = pooling_type
A = initializer_range
A = batch_norm_eps
A = batch_norm_momentum
A = dropout_rate
A = drop_connect_rate
A = sum(A_ ) * 4
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-5 | 22 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = XLMProphetNetTokenizer
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[str] = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
A = '[PAD]'
A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'[PAD]' )
self.assertEqual(vocab_keys[1] ,'[CLS]' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(A_ ) ,1012 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size ,1012 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
A = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] ,)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
A = 'Hello World!'
A = [3_5389, 6672, 49, 2]
self.assertListEqual(A_ ,self.big_tokenizer.encode(A_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
# fmt: off
A = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,) | 22 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowercase = logging.get_logger(__name__)
_lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def _snake_case ( snake_case__ : List[DatasetType] , snake_case__ : Optional[List[float]] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[NamedSplit] = None , snake_case__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(snake_case__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.' )
if i == 0:
A , A = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
else:
return _interleave_iterable_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
def _snake_case ( snake_case__ : List[DatasetType] , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[NamedSplit] = None , snake_case__ : int = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(snake_case__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.' )
if i == 0:
A , A = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
else:
return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ ) | 22 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 1 |
"""simple docstring"""
import numpy as np
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1e-12 , snake_case__ : int = 100 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
A = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A = False
A = 0
A = 0
A = 1e12
while not convergence:
# Multiple matrix by the vector.
A = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
A = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A = vector.conj().T if is_complex else vector.T
A = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A = True
A = lambda_
if is_complex:
A = np.real(lambda_ )
return lambda_, vector
def _snake_case ( ):
A = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A = np.array([41, 4, 20] )
A = real_input_matrix.astype(np.complexaaa )
A = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A = real_input_matrix
A = real_vector
elif problem_type == "complex":
A = complex_input_matrix
A = complex_vector
# Our implementation.
A , A = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A , A = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _snake_case ( snake_case__ : Dict ):
A = os.path.join(args.tf_model_dir , 'parameters.json' )
A = json.loads(open(snake_case__ ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith('.pt' ):
A = args.output + '.pt'
A = OrderedDict()
with tf.device('/CPU:0' ):
A = tf.train.load_checkpoint(args.tf_model_dir )
A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A = reader.get_tensor(snake_case__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
A = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
A = 8
A = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name.startswith('model/moe' ):
A = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
A = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name.endswith('/softmlp/kernel' ):
A = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
A = key_name[-9:-7]
for i in range(16 ):
A = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A = torch.tensor(snake_case__ )
elif key_name.startswith('model/mlp' ):
A = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
A = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name.endswith('/p1/bias' ):
A = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
elif key_name.endswith('/p2/kernel' ):
A = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name.endswith('/p2/bias' ):
A = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
elif key_name.startswith('model/ln' ):
A = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
A = 'model.blocks.%d.feed_forward.norm.bias' % player
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
elif key_name.endswith('/g' ):
A = 'model.blocks.%d.feed_forward.norm.weight' % player
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
elif key_name.startswith('model/att' ):
A = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A = state[:, 0, :, :]
A = state[:, 1, :, :]
A = state[:, 2, :, :]
A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
A = torch.tensor(snake_case__ )
A = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
A = torch.tensor(snake_case__ )
A = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
A = torch.tensor(snake_case__ )
elif key_name.endswith('/o/kernel' ):
A = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name.startswith('model/an' ):
A = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
A = 'model.blocks.%d.self_attn.norm.bias' % player
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
elif key_name.endswith('/g' ):
A = 'model.blocks.%d.self_attn.norm.weight' % player
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
A = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
A = 'model.%s.weight' % nlayer
A = vnp.copy() # same in embedded
A = torch.tensor(snake_case__ )
if key_name.startswith('model/wte' ):
A = 'lm_head.weight'
A = vnp.copy() # same in embedded
A = torch.tensor(snake_case__ )
elif key_name.startswith('model/wob' ):
A = 'final_logits_bias'
A = vnp.copy() # same in embedded
A = state.reshape((1, -1) )
A = torch.tensor(snake_case__ )
elif key_name == "model/dense/kernel":
A = 'model.last_project.weight'
A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A = torch.tensor(snake_case__ )
elif key_name == "model/dense_1/bias":
A = 'model.last_project.bias'
A = vnp.copy() # same because it is one dimensional
A = torch.tensor(snake_case__ )
torch.save(snake_case__ , args.output )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
_lowercase = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowercase = ['''small''', '''medium''', '''large''']
_lowercase = '''lm_head.decoder.weight'''
_lowercase = '''lm_head.weight'''
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = torch.load(snake_case__ )
A = d.pop(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_lowercase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowercase = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
_lowercase = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
) | 22 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''MCTCTFeatureExtractor'''
_lowerCamelCase: Optional[int] = '''AutoTokenizer'''
def __init__( self : str ,A_ : Tuple ,A_ : int ) -> List[str]:
super().__init__(A_ ,A_ )
A = self.feature_extractor
A = False
def __call__( self : Any ,*A_ : int ,**A_ : str ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ ,**A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,A_ )
A = kwargs.pop('sampling_rate' ,A_ )
A = kwargs.pop('text' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A_ ,*A_ ,sampling_rate=A_ ,**A_ )
if text is not None:
A = self.tokenizer(A_ ,**A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings['input_ids']
return inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> int:
return self.tokenizer.batch_decode(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ ,**A_ )
A = kwargs.pop('input_features' ,A_ )
A = kwargs.pop('labels' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(A_ ,*A_ ,**A_ )
if labels is not None:
A = self.tokenizer.pad(A_ ,**A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels['input_ids']
return input_features
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : Dict ,**A_ : List[str] ) -> List[str]:
return self.tokenizer.decode(*A_ ,**A_ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False | 22 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int , snake_case__ : set ):
A , A = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__ , snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A = 0
count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''megatron-bert'''
def __init__( self : Dict ,A_ : Optional[int]=2_9056 ,A_ : Tuple=1024 ,A_ : Optional[Any]=24 ,A_ : List[str]=16 ,A_ : Optional[Any]=4096 ,A_ : str="gelu" ,A_ : List[str]=0.1 ,A_ : Tuple=0.1 ,A_ : int=512 ,A_ : Optional[Any]=2 ,A_ : str=0.02 ,A_ : Optional[Any]=1e-12 ,A_ : Tuple=0 ,A_ : Tuple="absolute" ,A_ : str=True ,**A_ : int ,) -> Optional[Any]:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(snake_case__ )
assert result == expected | 22 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 1 |
"""simple docstring"""
import qiskit
def _snake_case ( snake_case__ : int , snake_case__ : int ):
A = qiskit.Aer.get_backend('aer_simulator' )
A = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A = qiskit.execute(snake_case__ , snake_case__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
_lowercase = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""") | 22 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''altclip_text_model'''
def __init__( self : Tuple ,A_ : Optional[Any]=25_0002 ,A_ : str=1024 ,A_ : int=24 ,A_ : str=16 ,A_ : Dict=4096 ,A_ : Optional[Any]="gelu" ,A_ : Optional[Any]=0.1 ,A_ : Any=0.1 ,A_ : Any=514 ,A_ : str=1 ,A_ : Union[str, Any]=0.02 ,A_ : Tuple=0.02 ,A_ : Optional[int]=1e-05 ,A_ : Optional[Any]=1 ,A_ : Tuple=0 ,A_ : List[str]=2 ,A_ : str="absolute" ,A_ : Optional[int]=True ,A_ : Union[str, Any]=768 ,**A_ : Optional[Any] ,) -> Dict:
super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = initializer_factor
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = project_dim
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''altclip_vision_model'''
def __init__( self : Tuple ,A_ : str=768 ,A_ : List[Any]=3072 ,A_ : List[Any]=512 ,A_ : str=12 ,A_ : Optional[Any]=12 ,A_ : Any=3 ,A_ : Tuple=224 ,A_ : Tuple=32 ,A_ : int="quick_gelu" ,A_ : Optional[int]=1e-5 ,A_ : Tuple=0.0 ,A_ : Dict=0.02 ,A_ : List[str]=1.0 ,**A_ : List[str] ,) -> List[str]:
super().__init__(**A_ )
A = hidden_size
A = intermediate_size
A = projection_dim
A = num_hidden_layers
A = num_attention_heads
A = num_channels
A = patch_size
A = image_size
A = initializer_range
A = initializer_factor
A = attention_dropout
A = layer_norm_eps
A = hidden_act
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, os.PathLike] ,**A_ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
A , A = cls.get_config_dict(A_ ,**A_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ ,**A_ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = '''altclip'''
_lowerCamelCase: Union[str, Any] = True
def __init__( self : List[str] ,A_ : Optional[Any]=None ,A_ : List[Any]=None ,A_ : Optional[int]=768 ,A_ : Optional[Any]=2.65_92 ,**A_ : str ) -> Optional[Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A = kwargs.pop('text_config_dict' ,A_ )
A = kwargs.pop('vision_config_dict' ,A_ )
super().__init__(**A_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A = {}
# This is the complete result when using `text_config_dict`.
A = AltCLIPTextConfig(**A_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(A_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A = {}
# This is the complete result when using `vision_config_dict`.
A = AltCLIPVisionConfig(**A_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A = {
str(A_ ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
A = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(A_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
A = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
A = AltCLIPTextConfig(**A_ )
A = AltCLIPVisionConfig(**A_ )
A = projection_dim
A = logit_scale_init_value
A = 1.0
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,A_ : AltCLIPTextConfig ,A_ : AltCLIPVisionConfig ,**A_ : List[Any] ) -> Any:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = copy.deepcopy(self.__dict__ )
A = self.text_config.to_dict()
A = self.vision_config.to_dict()
A = self.__class__.model_type
return output | 22 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _snake_case ( snake_case__ : List[Any] ):
A = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
A = True if 'large' in model_name or 'huge' in model_name else False
A = True if 'large' in model_name or 'huge' in model_name else False
A = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A = [3, 3, 3, 3]
A = [5, 5, 5, 5]
elif "fl4" in model_name:
A = [4, 4, 4, 4]
A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A = [3, 3, 3, 3]
if "lrf" in model_name:
A = [3, 3, 3, 3]
else:
A = [2, 2, 2, 2]
if "tiny" in model_name:
A = 96
elif "small" in model_name:
A = 96
elif "base" in model_name:
A = 128
elif "large" in model_name:
A = 192
elif "xlarge" in model_name:
A = 256
elif "huge" in model_name:
A = 352
# set label information
A = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
A = 'imagenet-22k-id2label.json'
else:
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = FocalNetConfig(
embed_dim=snake_case__ , depths=snake_case__ , focal_levels=snake_case__ , focal_windows=snake_case__ , use_conv_embed=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , use_post_layernorm=snake_case__ , use_layerscale=snake_case__ , )
return config
def _snake_case ( snake_case__ : int ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "encoder.layers" in name:
A = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
A = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
A = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'focalnet.' + name
return name
def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : Dict=False ):
# fmt: off
A = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
A = model_name_to_url[model_name]
print('Checkpoint URL: ' , snake_case__ )
A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
A = state_dict.pop(snake_case__ )
A = val
A = get_focalnet_config(snake_case__ )
A = FocalNetForImageClassification(snake_case__ )
model.eval()
# load state dict
model.load_state_dict(snake_case__ )
# verify conversion
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = BitImageProcessor(
do_resize=snake_case__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case__ , crop_size=224 , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = processor(images=snake_case__ , return_tensors='pt' )
A = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A = image_transforms(snake_case__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , snake_case__ , atol=1e-4 )
A = model(**snake_case__ )
A = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
A = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
A = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
A = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
A = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
A = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 22 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
raise NotImplementedError() | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
_lowercase = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
_lowercase = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _snake_case ( snake_case__ : Tuple ):
A = (images / 2 + 0.5).clamp(0 , 1 )
A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = numpy_to_pil(snake_case__ )
return images
def _snake_case ( snake_case__ : Any ):
if images.ndim == 3:
A = images[None, ...]
A = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
A = [Image.fromarray(snake_case__ ) for image in images]
return pil_images | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _snake_case ( snake_case__ : Optional[int] ):
A = model.config
A = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
A = MBartConfig(
is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=snake_case__ , add_final_layer_norm=snake_case__ , )
return encoder_config, decoder_config
def _snake_case ( snake_case__ : str ):
if "encoder.model" in name:
A = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
A = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
A = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
A = 'encoder.layernorm.bias'
return name
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : str ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
A = key.split('.' )
A = int(key_split[3] )
A = int(key_split[5] )
A = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple=None , snake_case__ : List[Any]=False ):
# load original model
A = DonutModel.from_pretrained(snake_case__ ).eval()
# load HuggingFace model
A , A = get_configs(snake_case__ )
A = DonutSwinModel(snake_case__ )
A = MBartForCausalLM(snake_case__ )
A = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
A = original_model.state_dict()
A = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify results on scanned document
A = load_dataset('hf-internal-testing/example-documents' )
A = dataset['test'][0]['image'].convert('RGB' )
A = XLMRobertaTokenizerFast.from_pretrained(snake_case__ , from_slow=snake_case__ )
A = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A = DonutProcessor(snake_case__ , snake_case__ )
A = processor(snake_case__ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
A = 'When is the coffee break?'
A = task_prompt.replace('{user_input}' , snake_case__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A = 'hello world'
else:
raise ValueError('Model name not supported' )
A = original_model.decoder.tokenizer(snake_case__ , add_special_tokens=snake_case__ , return_tensors='pt' )[
'input_ids'
]
A = original_model.encoder.model.patch_embed(snake_case__ )
A , A = model.encoder.embeddings(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
# verify encoder hidden states
A = original_model.encoder(snake_case__ )
A = model.encoder(snake_case__ ).last_hidden_state
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-2 )
# verify decoder hidden states
A = original_model(snake_case__ , snake_case__ , snake_case__ ).logits
A = model(snake_case__ , decoder_input_ids=snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
_lowercase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 22 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : Dict ):
A = []
A = []
A = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
A = len(snake_case__ ) if (len(snake_case__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(snake_case__ ) , 'Postfix'.center(snake_case__ ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(snake_case__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(snake_case__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(snake_case__ ) == 0:
stack.append(snake_case__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(snake_case__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(snake_case__ ) # push x to stack
print(
x.center(8 ) , (''.join(snake_case__ )).ljust(snake_case__ ) , (''.join(snake_case__ )).ljust(snake_case__ ) , sep=' | ' , ) # Output in tabular format
while len(snake_case__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(snake_case__ )).ljust(snake_case__ ) , (''.join(snake_case__ )).ljust(snake_case__ ) , sep=' | ' , ) # Output in tabular format
return "".join(snake_case__ ) # return Postfix as str
def _snake_case ( snake_case__ : Tuple ):
A = list(infix[::-1] ) # reverse the infix equation
for i in range(len(snake_case__ ) ):
if infix[i] == "(":
A = ')' # change "(" to ")"
elif infix[i] == ")":
A = '(' # change ")" to "("
return (infix_2_postfix(''.join(snake_case__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowercase = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
_lowercase = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''') | 22 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.