code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import comet # From: unbabel-comet
import torch
import datasets
A__ : Union[str, Any] = datasets.logging.get_logger(__name__)
A__ : str = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
A__ : List[Any] = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
A__ : Tuple = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if self.config_name == "default":
lowerCAmelCase__ : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
lowerCAmelCase__ : str = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ):
"""simple docstring"""
if gpus is None:
lowerCAmelCase__ : Union[str, Any] = 1 if torch.cuda.is_available() else 0
lowerCAmelCase__ : Union[str, Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
lowerCAmelCase__ : str = [dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) for t in zip(*data.values() )]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.scorer.predict(SCREAMING_SNAKE_CASE__ , gpus=SCREAMING_SNAKE_CASE__ , progress_bar=SCREAMING_SNAKE_CASE__ )
return {"mean_score": mean_score, "scores": scores}
| 233 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ : Dict = 1_6
A__ : Union[str, Any] = 3_2
def _a ( __UpperCamelCase : List[str] ):
return int(x / 2**20 )
class lowercase :
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ : str = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ : Optional[Any] = torch.cuda.memory_allocated()
lowerCAmelCase__ : str = torch.cuda.max_memory_allocated()
lowerCAmelCase__ : Any = bamb(self.end - self.begin )
lowerCAmelCase__ : Optional[int] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _a ( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ,__UpperCamelCase : str = "bert-base-cased" ,__UpperCamelCase : int = 320 ,__UpperCamelCase : int = 160 ,):
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCAmelCase__ : List[Any] = load_dataset(
'''glue''' ,'''mrpc''' ,split={'''train''': f'''train[:{n_train}]''', '''validation''': f'''validation[:{n_val}]'''} )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : Dict = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _a ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
# Initialize accelerator
lowerCAmelCase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : str = config['''lr''']
lowerCAmelCase__ : Any = int(config['''num_epochs'''] )
lowerCAmelCase__ : str = int(config['''seed'''] )
lowerCAmelCase__ : List[Any] = int(config['''batch_size'''] )
lowerCAmelCase__ : Optional[int] = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCAmelCase__ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Tuple = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Union[str, Any] = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCAmelCase__ : Dict = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : int = 0
# Now we train the model
lowerCAmelCase__ : Optional[Any] = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCAmelCase__ : List[str] = model(**__UpperCamelCase )
lowerCAmelCase__ : Dict = outputs.loss
lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''peak_memory_utilization.json''' ) ,'''w''' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _a ( ):
lowerCAmelCase__ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__UpperCamelCase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__UpperCamelCase ,)
parser.add_argument(
'''--output_dir''' ,type=__UpperCamelCase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--peak_memory_upper_bound''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' ,)
parser.add_argument(
'''--n_train''' ,type=__UpperCamelCase ,default=320 ,help='''Number of training examples to use.''' ,)
parser.add_argument(
'''--n_val''' ,type=__UpperCamelCase ,default=160 ,help='''Number of validation examples to use.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__UpperCamelCase ,default=1 ,help='''Number of train epochs.''' ,)
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 233 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase: List[str] = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _lowerCamelCase ( ):
_lowerCAmelCase = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def _lowerCamelCase ( snake_case=None ):
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('config' , description=snake_case )
else:
_lowerCAmelCase = argparse.ArgumentParser('Accelerate config command' , description=snake_case )
parser.add_argument(
'--config_file' , default=snake_case , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case )
return parser
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(snake_case ):
os.makedirs(snake_case )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case )
else:
config.to_yaml_file(snake_case )
print(F'accelerate configuration saved at {config_file}' )
def _lowerCamelCase ( ):
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(snake_case )
if __name__ == "__main__":
main()
| 225 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase: Optional[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
UpperCamelCase__ =PegasusConfig
UpperCamelCase__ ={}
UpperCamelCase__ ="gelu"
def __init__( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : str=13 , lowercase__ : Any=7 , lowercase__ : Tuple=True , lowercase__ : str=False , lowercase__ : Optional[int]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Optional[int]=5 , lowercase__ : Optional[int]=4 , lowercase__ : List[str]=37 , lowercase__ : Dict=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Optional[Any]=20 , lowercase__ : int=2 , lowercase__ : Dict=1 , lowercase__ : Union[str, Any]=0 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Any] ):
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(lowercase__ )
_lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
_lowerCAmelCase , _lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = model.decode(lowercase__ , lowercase__ )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : str ):
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(lowercase__ )
_lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
_lowerCAmelCase , _lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ):
if attention_mask is None:
_lowerCAmelCase = np.not_equal(snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase__ =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase__ =True
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = FlaxPegasusModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = self._prepare_for_class(lowercase__ , lowercase__ )
_lowerCAmelCase = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : int , lowercase__ : List[str]=None , **lowercase__ : Optional[Any] ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = model_class(lowercase__ )
_lowerCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowerCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowercase__ )
_lowerCAmelCase = np.ones((1, 1) )
_lowerCAmelCase = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
_lowerCAmelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
_lowerCAmelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
_lowerCAmelCase = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
_lowerCAmelCase = tokenizer(lowercase__ , return_tensors='np' , truncation=lowercase__ , max_length=5_12 , padding=lowercase__ )
_lowerCAmelCase = model.generate(**lowercase__ , num_beams=2 ).sequences
_lowerCAmelCase = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 225 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : List[str] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : str= StableUnCLIPImgaImgPipeline
_a : Optional[int]= TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a : List[str]= TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Any= frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a : Optional[int]= frozenset([] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = 32
lowercase : str = embedder_hidden_size
# image encoding components
lowercase : List[str] = CLIPImageProcessor(crop_size=32 ,size=32 )
torch.manual_seed(0 )
lowercase : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=snake_case ,projection_dim=snake_case ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,) )
# regular denoising components
torch.manual_seed(0 )
lowercase : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
lowercase : Tuple = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase : int = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=snake_case ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) )
torch.manual_seed(0 )
lowercase : Tuple = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type="""projection""" ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=snake_case ,layers_per_block=1 ,upcast_attention=snake_case ,use_linear_projection=snake_case ,)
torch.manual_seed(0 )
lowercase : Union[str, Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" ,beta_start=0.00_085 ,beta_end=0.012 ,prediction_type="""v_prediction""" ,set_alpha_to_one=snake_case ,steps_offset=1 ,)
torch.manual_seed(0 )
lowercase : int = AutoencoderKL()
lowercase : Any = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ,snake_case=True ):
'''simple docstring'''
if str(snake_case ).startswith("""mps""" ):
lowercase : int = torch.manual_seed(snake_case )
else:
lowercase : Dict = torch.Generator(device=snake_case ).manual_seed(snake_case )
lowercase : Optional[Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(snake_case ) ).to(snake_case )
if pil_image:
lowercase : List[Any] = input_image * 0.5 + 0.5
lowercase : Union[str, Any] = input_image.clamp(0 ,1 )
lowercase : List[Any] = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
lowercase : int = DiffusionPipeline.numpy_to_pil(snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] = self.get_dummy_components()
lowercase : Dict = StableUnCLIPImgaImgPipeline(**snake_case )
lowercase : List[str] = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
lowercase : str = self.get_dummy_inputs(snake_case )
inputs.update({"""image_embeds""": None} )
lowercase : List[str] = sd_pipe(**snake_case ).images
lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : List[str] = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowercase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
lowercase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" ,torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase : Dict = pipe(snake_case ,"""anime turle""" ,generator=snake_case ,output_type="""np""" )
lowercase : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowercase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
lowercase : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" ,torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase : Union[str, Any] = pipe(snake_case ,"""anime turle""" ,generator=snake_case ,output_type="""np""" )
lowercase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" ,torch_dtype=torch.floataa )
lowercase : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Tuple = pipe(
snake_case ,"""anime turtle""" ,num_inference_steps=2 ,output_type="""np""" ,)
lowercase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 336 | 1 |
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( lowercase__ ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , UpperCAmelCase__ = 1 , UpperCAmelCase__ = None , UpperCAmelCase__ = 50 , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , **UpperCAmelCase__ , ):
A__ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase__ , )
A__ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase__ ), "This is a local test"
| 700 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( _A : list , _A : list , _A : list , _A : list , _A : list )-> float:
"""simple docstring"""
A__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_A )] )
A__ = np.array(_A )
A__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _A ) ) , x.transpose() ) , _A )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( _A : list , _A : list , _A : list )-> float:
"""simple docstring"""
A__ = (1, 2, 1)
A__ = (1, 1, 0, 7)
A__ = SARIMAX(
_A , exog=_A , order=_A , seasonal_order=_A )
A__ = model.fit(disp=_A , maxiter=600 , method="nm" )
A__ = model_fit.predict(1 , len(_A ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( _A : list , _A : list , _A : list )-> float:
"""simple docstring"""
A__ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_A , _A )
A__ = regressor.predict(_A )
return y_pred[0]
def UpperCamelCase ( _A : list )-> float:
"""simple docstring"""
train_user.sort()
A__ = np.percentile(_A , 25 )
A__ = np.percentile(_A , 75 )
A__ = qa - qa
A__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( _A : list , _A : float )-> bool:
"""simple docstring"""
A__ = 0
A__ = 0
for i in list_vote:
if i > actual_result:
A__ = not_safe + 1
else:
if abs(abs(_A ) - abs(_A ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase_ : Tuple = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCAmelCase_ : Optional[int] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCAmelCase_ : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase_ : str = normalize_df[:, 2].tolist()
UpperCAmelCase_ : Optional[int] = normalize_df[:, 0].tolist()
UpperCAmelCase_ : List[str] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase_ : Optional[int] = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase_ : List[str] = x[: len(x) - 1]
UpperCAmelCase_ : Optional[Any] = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase_ : Union[str, Any] = total_date[: len(total_date) - 1]
UpperCAmelCase_ : Dict = total_user[: len(total_user) - 1]
UpperCAmelCase_ : Any = total_match[: len(total_match) - 1]
UpperCAmelCase_ : str = total_date[len(total_date) - 1 :]
UpperCAmelCase_ : Dict = total_user[len(total_user) - 1 :]
UpperCAmelCase_ : Dict = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase_ : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase_ : Optional[int] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 232 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase_ ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCAmelCase__ = flatten_dict(snake_case__ )
return flax_params
def UpperCAmelCase_ ( snake_case__ ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
lowerCAmelCase__ = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase__ = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase__ = new_key.replace(snake_case__ , snake_case__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase__ = new_key.replace(snake_case__ , snake_case__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase__ = re.sub(R'layers_(\d+)' , R'layer.\1' , snake_case__ )
lowerCAmelCase__ = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase__ = re.sub(R'layers_(\d+)' , R'layer.\1' , snake_case__ )
lowerCAmelCase__ = flax_dict[key]
lowerCAmelCase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase__ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=False , snake_case__=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = get_flax_param(snake_case__ )
if not use_large:
lowerCAmelCase__ = PixaStructVisionConfig()
lowerCAmelCase__ = PixaStructTextConfig()
else:
lowerCAmelCase__ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase__ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCAmelCase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case__ )
lowerCAmelCase__ = PixaStructForConditionalGeneration(snake_case__ )
lowerCAmelCase__ = rename_and_convert_flax_params(snake_case__ )
model.load_state_dict(snake_case__ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
lowerCAmelCase__ = PixaStructImageProcessor()
lowerCAmelCase__ = PixaStructProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
if use_large:
lowerCAmelCase__ = 4096
lowerCAmelCase__ = True
# mkdir if needed
os.makedirs(snake_case__ , exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
print('Model saved in {}'.format(snake_case__ ) )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowerCAmelCase : Dict = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 193 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase__ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname ,a_ )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(a_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowerCAmelCase__ = self.get_image_processor(do_normalize=a_ ,padding_value=1.0 )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(a_ ,return_tensors='np' )
lowerCAmelCase__ = processor(images=a_ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = processor(text=a_ )
lowerCAmelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(a_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(a_ )
lowerCAmelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = 'lower newer'
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=a_ ,images=a_ )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 193 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def __lowerCAmelCase ( A_ : Optional[int] ) -> List[Any]:
__UpperCAmelCase = {}
__UpperCAmelCase = os.path.join(A_ , "all_results.json" )
if os.path.exists(A_ ):
with open(A_ , "r" ) as f:
__UpperCAmelCase = json.load(A_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def _UpperCAmelCase ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
import xla_spawn
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__lowerCAmelCase , "argv" , __lowerCAmelCase ):
__UpperCAmelCase = time()
xla_spawn.main()
__UpperCAmelCase = time()
__UpperCAmelCase = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _UpperCAmelCase ( self: str ) -> List[str]:
'''simple docstring'''
import xla_spawn
__UpperCAmelCase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__lowerCAmelCase , "argv" , __lowerCAmelCase ):
xla_spawn.main()
| 286 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : int = DiTPipeline
lowerCAmelCase__ : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowerCAmelCase__ : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ : Optional[int] = False
def _UpperCAmelCase ( self: str ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__lowerCAmelCase , )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = DDIMScheduler()
__UpperCAmelCase = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _UpperCAmelCase ( self: str , __lowerCAmelCase: str , __lowerCAmelCase: Optional[int]=0 ) -> int:
'''simple docstring'''
if str(__lowerCAmelCase ).startswith("mps" ):
__UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
__UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
__UpperCAmelCase = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self: Tuple ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = "cpu"
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
__UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
__UpperCAmelCase = pipe(**__lowerCAmelCase ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def _UpperCAmelCase ( self: int ) -> str:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self: Any ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__UpperCAmelCase = ["vase", "umbrella", "white shark", "white wolf"]
__UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
__UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__UpperCAmelCase = ["vase", "umbrella"]
__UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 286 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : int =get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Union[str, Any] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : List[str] =get_activation('''gelu''' )
lowercase : Optional[int] =get_activation('''gelu_10''' )
lowercase : Union[str, Any] =torch_builtin(UpperCAmelCase__ )
lowercase : int =geluaa(UpperCAmelCase__ )
lowercase : Dict =torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : Tuple =get_activation('''gelu''' )
lowercase : Optional[int] =1
lowercase : List[Any] =get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
lowercase : Tuple =acta.a
| 92 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase : Union[str, Any] =img
lowercase : Union[str, Any] =img.shape[1]
lowercase : str =img.shape[0]
lowercase : Union[str, Any] =dst_width
lowercase : str =dst_height
lowercase : str =self.src_w / self.dst_w
lowercase : Optional[Any] =self.src_h / self.dst_h
lowercase : int =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase : List[Any] =self.img[self.get_y(UpperCAmelCase__ )][self.get_x(UpperCAmelCase__ )]
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ , UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread("""image_data/lena.jpg""", 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 92 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , ):
lowercase : List[str] = size if size is not None else {"shortest_edge": 20}
lowercase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase : int = parent
lowercase : str = batch_size
lowercase : Optional[Any] = num_channels
lowercase : str = image_size
lowercase : Union[str, Any] = min_resolution
lowercase : List[str] = max_resolution
lowercase : Optional[Any] = do_resize
lowercase : List[Any] = size
lowercase : Dict = do_center_crop
lowercase : List[Any] = crop_size
def __magic_name__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( a_, unittest.TestCase ):
__lowerCAmelCase = MobileNetVaImageProcessor if is_vision_available() else None
def __magic_name__ ( self ):
lowercase : Optional[Any] = MobileNetVaImageProcessingTester(self )
@property
def __magic_name__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ):
lowercase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
self.assertTrue(hasattr(_a , "do_center_crop" ) )
self.assertTrue(hasattr(_a , "crop_size" ) )
def __magic_name__ ( self ):
lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
# Initialize image_processing
lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase : str = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __magic_name__ ( self ):
# Initialize image_processing
lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase : str = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __magic_name__ ( self ):
# Initialize image_processing
lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase : Optional[int] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 518 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : List[str] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase : Any = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(_a )
from datasets import load_dataset
lowercase : Any = load_dataset("nielsr/rvlcdip-demo" )
lowercase : List[str] = dataset["train"][0]["image"].convert("RGB" )
lowercase : str = image_processor(_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
lowercase : Tuple = model(**_a )
lowercase : Dict = outputs.logits
lowercase : Union[str, Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , _a )
lowercase : int = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1E-4 ) )
| 518 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=_A , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=_A , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_A , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
snake_case_ = field(
default=_A , metadata={"help": "Train language if it is different from the evaluation language."} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(
default=_A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=_A , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
snake_case_ = field(
default=_A , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case_ = field(
default=_A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case_ = field(
default=_A , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : int = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase : Any = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase : Optional[Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = predict_dataset.features["label"].names
# Labels
lowerCAmelCase : Tuple = len(a_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a_ ,idalabel={str(a_ ): label for i, label in enumerate(a_ )} ,labelaid={label: i for i, label in enumerate(a_ )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : int = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Union[str, Any] = False
def preprocess_function(a_ : Dict ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=a_ ,max_length=data_args.max_seq_length ,truncation=a_ ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : Tuple = min(len(a_ ) ,data_args.max_train_samples )
lowerCAmelCase : int = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase : Optional[int] = train_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(a_ ) ) ,3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : int = min(len(a_ ) ,data_args.max_eval_samples )
lowerCAmelCase : Optional[Any] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase : Optional[Any] = eval_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Optional[Any] = min(len(a_ ) ,data_args.max_predict_samples )
lowerCAmelCase : Optional[Any] = predict_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase : Tuple = predict_dataset.map(
a_ ,batched=a_ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowerCAmelCase : str = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ : EvalPrediction ):
lowerCAmelCase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions ,a_ ) else p.predictions
lowerCAmelCase : Any = np.argmax(a_ ,axis=1 )
return metric.compute(predictions=a_ ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Union[str, Any] = DataCollatorWithPadding(a_ ,pad_to_multiple_of=8 )
else:
lowerCAmelCase : str = None
# Initialize our Trainer
lowerCAmelCase : Tuple = Trainer(
model=a_ ,args=a_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=a_ ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
lowerCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : List[str] = last_checkpoint
lowerCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=a_ )
lowerCAmelCase : List[Any] = train_result.metrics
lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,a_ )
trainer.save_metrics("train" ,a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=a_ )
lowerCAmelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("eval" ,a_ )
trainer.save_metrics("eval" ,a_ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = trainer.predict(a_ ,metric_key_prefix="predict" )
lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(a_ )
)
lowerCAmelCase : Any = min(a_ ,len(a_ ) )
trainer.log_metrics("predict" ,a_ )
trainer.save_metrics("predict" ,a_ )
lowerCAmelCase : Optional[int] = np.argmax(a_ ,axis=1 )
lowerCAmelCase : Any = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(a_ ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(a_ ):
lowerCAmelCase : str = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 525 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __A ( a_ : Dict ,a_ : List[Any] ,a_ : List[str]=None ,a_ : Optional[int]=None ,a_ : Any=None ,a_ : Any=None ,a_ : str=None ,a_ : Union[str, Any]=None ,):
if attention_mask is None:
lowerCAmelCase : List[Any] = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
lowerCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
lowerCAmelCase : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=False , a_=99 , a_=16 , a_=2 , a_=4 , a_=4 , a_="gelu" , a_=0.1 , a_=0.1 , a_=32 , a_=2 , a_=1 , a_=0 , a_=0.02 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : int = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Optional[int] = eos_token_id
lowerCAmelCase : Union[str, Any] = pad_token_id
lowerCAmelCase : Tuple = bos_token_id
lowerCAmelCase : Tuple = initializer_range
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : List[str] = shift_tokens_right(a_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a_ , )
lowerCAmelCase : Tuple = prepare_blenderbot_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Any = 20
lowerCAmelCase : int = model_class_name(a_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase , lowerCAmelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
lowerCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , a_ , decoder_attention_mask=a_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a_ , )
lowerCAmelCase : int = model.decode(a_ , a_ )
lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Union[str, Any] = 20
lowerCAmelCase : Tuple = model_class_name(a_ )
lowerCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
lowerCAmelCase , lowerCAmelCase : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , a_ , a_ )
lowerCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , a_ , decoder_attention_mask=a_ , past_key_values=a_ , decoder_position_ids=a_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, -1:] , a_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a_ , decoder_position_ids=a_ , )
lowerCAmelCase : List[str] = model.decode(a_ , a_ , decoder_attention_mask=a_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
snake_case_ = 99
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : Union[str, Any] = input_ids.shape[0]
lowerCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self._get_config_and_data()
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration(a_ )
lowerCAmelCase : str = lm_model(input_ids=a_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(a_ )
lowerCAmelCase : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = lm_model(input_ids=a_ , decoder_input_ids=a_ )
lowerCAmelCase : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCAmelCase : Dict = shift_tokens_right(a_ , 1 , 2 )
lowerCAmelCase : Optional[Any] = np.equal(a_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : Any = np.equal(a_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( _A , unittest.TestCase , _A ):
snake_case_ = True
snake_case_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = FlaxBlenderbotModelTester(self )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_ , a_ , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_ , a_ , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : List[str] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase : str = model_class(a_ )
@jax.jit
def encode_jitted(a_ , a_=None , **a_ ):
return model.encode(input_ids=a_ , attention_mask=a_ )
with self.subTest("JIT Enabled" ):
lowerCAmelCase : List[Any] = encode_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase : Tuple = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : int = model_class(a_ )
lowerCAmelCase : Optional[int] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
lowerCAmelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a_ , a_ , a_ ):
return model.decode(
decoder_input_ids=a_ , decoder_attention_mask=a_ , encoder_outputs=a_ , )
with self.subTest("JIT Enabled" ):
lowerCAmelCase : str = decode_jitted(**a_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase : Optional[Any] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) )
for jitted_output, output in zip(a_ , a_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : List[str] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : Dict = model(a_ )
self.assertIsNotNone(a_ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
lowerCAmelCase : Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
lowerCAmelCase : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=a_ )
lowerCAmelCase : List[str] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
lowerCAmelCase : Any = ["Sam"]
lowerCAmelCase : int = tokenizer(a_ , return_tensors="jax" )
lowerCAmelCase : List[Any] = model.generate(**a_ , **a_ )
lowerCAmelCase : Optional[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(a_ , **a_ )
assert generated_txt[0].strip() == tgt_text
| 525 | 1 |
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 0
lowercase : List[Any] = len(__snake_case ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
lowercase : Dict = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowercase : Any = left
lowercase : Dict = point
elif point > right:
lowercase : Tuple = right
lowercase : List[Any] = point
else:
if item < current_item:
lowercase : int = point - 1
else:
lowercase : List[str] = point + 1
return None
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__snake_case ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
elif point > right:
return interpolation_search_by_recursion(__snake_case , __snake_case , __snake_case , __snake_case )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__snake_case , __snake_case , __snake_case , point - 1 )
else:
return interpolation_search_by_recursion(
__snake_case , __snake_case , point + 1 , __snake_case )
def snake_case( __magic_name__ ) -> Dict:
'''simple docstring'''
if collection != sorted(__snake_case ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase_ = 0
if debug == 1:
lowerCAmelCase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
lowerCAmelCase_ = 67
lowerCAmelCase_ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('Not found')
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 596 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a__( lowerCamelCase__ ):
lowercase__ = (EulerDiscreteScheduler,)
lowercase__ = 10
def lowercase_ ( self : Tuple , **__snake_case : str ):
a : Union[str, Any] = {
'num_train_timesteps': 11_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__snake_case )
return config
def lowercase_ ( self : str ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowercase_ ( self : Tuple ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def lowercase_ ( self : Dict ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def lowercase_ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowercase_ ( self : Any ):
a : Optional[int] = self.scheduler_classes[0]
a : int = self.get_scheduler_config()
a : Optional[Any] = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
a : Union[str, Any] = torch.manual_seed(0 )
a : Union[str, Any] = self.dummy_model()
a : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Tuple = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
a : int = scheduler.scale_model_input(__snake_case , __snake_case )
a : List[str] = model(__snake_case , __snake_case )
a : Dict = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
a : Union[str, Any] = output.prev_sample
a : Any = torch.sum(torch.abs(__snake_case ) )
a : List[Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowercase_ ( self : Tuple ):
a : Optional[int] = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
a : int = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
a : Tuple = torch.manual_seed(0 )
a : Union[str, Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : int = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
a : Tuple = scheduler.scale_model_input(__snake_case , __snake_case )
a : Optional[int] = model(__snake_case , __snake_case )
a : Union[str, Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
a : Optional[Any] = output.prev_sample
a : Any = torch.sum(torch.abs(__snake_case ) )
a : List[Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = self.scheduler_classes[0]
a : Tuple = self.get_scheduler_config()
a : str = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
a : List[str] = torch.manual_seed(0 )
a : Dict = self.dummy_model()
a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : Optional[int] = sample.to(__snake_case )
for t in scheduler.timesteps:
a : int = scheduler.scale_model_input(__snake_case , __snake_case )
a : List[str] = model(__snake_case , __snake_case )
a : Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
a : int = output.prev_sample
a : Optional[Any] = torch.sum(torch.abs(__snake_case ) )
a : List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def lowercase_ ( self : List[str] ):
a : Optional[int] = self.scheduler_classes[0]
a : Dict = self.get_scheduler_config()
a : Tuple = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
a : Optional[Any] = torch.manual_seed(0 )
a : int = self.dummy_model()
a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : Optional[int] = sample.to(__snake_case )
for t in scheduler.timesteps:
a : Tuple = scheduler.scale_model_input(__snake_case , __snake_case )
a : Any = model(__snake_case , __snake_case )
a : Dict = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
a : str = output.prev_sample
a : Tuple = torch.sum(torch.abs(__snake_case ) )
a : Union[str, Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 526 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Tuple = logging.get_logger(__name__)
lowerCAmelCase: Dict = '▁'
lowerCAmelCase: Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase: int = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase: Any = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
lowerCAmelCase: Optional[int] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase: List[Any] = {'mustc': MUSTC_LANGS}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
def __init__( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : str="<unk>" , __snake_case : Dict=False , __snake_case : int=False , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Union[str, Any] , ):
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
a : Tuple = do_upper_case
a : Optional[Any] = do_lower_case
a : List[str] = load_json(__snake_case )
a : Dict = {v: k for k, v in self.encoder.items()}
a : int = spm_file
a : Tuple = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
a : Any = lang_codes
a : str = LANGUAGES[lang_codes]
a : Tuple = [F"""<lang:{lang}>""" for lang in self.langs]
a : str = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
a : Optional[Any] = self.lang_tokens
a : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a : List[str] = {}
@property
def lowercase_ ( self : Optional[Any] ):
return len(self.encoder )
@property
def lowercase_ ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self : int , __snake_case : Optional[int] ):
a : Union[str, Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def lowercase_ ( self : str , __snake_case : str ):
a : int = self.lang_code_to_id[tgt_lang]
a : int = [lang_code_id]
def lowercase_ ( self : Optional[int] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase_ ( self : List[str] , __snake_case : List[Any] ):
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def lowercase_ ( self : List[Any] , __snake_case : int ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[Any] = []
a : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a : Union[str, Any] = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a : Optional[int] = []
else:
current_sub_tokens.append(__snake_case )
a : Tuple = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self : int , __snake_case : List[Any] , __snake_case : List[str]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
a : Optional[int] = [1] * len(self.prefix_tokens )
a : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
a : List[str] = self.__dict__.copy()
a : Union[str, Any] = None
return state
def __setstate__( self : str , __snake_case : Dict ):
a : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : int = {}
a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = Path(__snake_case )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
a : Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
a : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def lowerCamelCase__ ( _A ):
with open(_A , 'r' ) as f:
return json.load(_A )
def lowerCamelCase__ ( _A , _A ):
with open(_A , 'w' ) as f:
json.dump(_A , _A , indent=2 )
| 526 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 443 |
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : List[Any] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCAmelCase_ : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCAmelCase_ : Optional[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="binary" , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Any:
a_ : List[Any] = fa_score(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , pos_label=SCREAMING_SNAKE_CASE__ , average=SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ )
return {"f1": float(SCREAMING_SNAKE_CASE__ ) if score.size == 1 else score}
| 443 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Dict = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'unispeech-sat'
def __init__( self :List[str] , lowerCamelCase_ :Union[str, Any]=3_2 , lowerCamelCase_ :List[str]=7_6_8 , lowerCamelCase_ :str=1_2 , lowerCamelCase_ :int=1_2 , lowerCamelCase_ :List[Any]=3_0_7_2 , lowerCamelCase_ :str="gelu" , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :int=0.1 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Tuple=0.1 , lowerCamelCase_ :Dict=0.02 , lowerCamelCase_ :Any=1e-5 , lowerCamelCase_ :List[str]="group" , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_ :List[str]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_ :Any=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :str=1_2_8 , lowerCamelCase_ :str=1_6 , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :Any=True , lowerCamelCase_ :Dict=0.05 , lowerCamelCase_ :Any=1_0 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :int=1_0 , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :str=3_2_0 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :int=1_0_0 , lowerCamelCase_ :List[str]=2_5_6 , lowerCamelCase_ :Optional[Any]=2_5_6 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]="mean" , lowerCamelCase_ :Tuple=False , lowerCamelCase_ :List[str]=False , lowerCamelCase_ :List[str]=2_5_6 , lowerCamelCase_ :Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase_ :Optional[Any]=(5, 3, 3, 1, 1) , lowerCamelCase_ :Tuple=(1, 2, 3, 1, 1) , lowerCamelCase_ :List[Any]=5_1_2 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :str=1 , lowerCamelCase_ :List[Any]=2 , lowerCamelCase_ :Tuple=5_0_4 , **lowerCamelCase_ :List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_norm
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_clusters
UpperCamelCase__ = do_stable_layer_norm
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ = num_codevectors_per_group
UpperCamelCase__ = num_codevector_groups
UpperCamelCase__ = contrastive_logits_temperature
UpperCamelCase__ = feat_quantizer_dropout
UpperCamelCase__ = num_negatives
UpperCamelCase__ = codevector_dim
UpperCamelCase__ = proj_codevector_dim
UpperCamelCase__ = diversity_loss_weight
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = xvector_output_dim
@property
def lowerCamelCase__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 516 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : int = get_logger(__name__)
A : Dict = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self :int , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self :Optional[Any] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self :List[str] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int , **lowerCamelCase_ :Any ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
UpperCamelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
else:
UpperCamelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :float ) -> Tuple:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
UpperCamelCase__ = temperature
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = scores / self.temperature
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :float , lowerCamelCase_ :float = -float("Inf" ) , lowerCamelCase_ :int = 1 ) -> str:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
UpperCamelCase__ = top_p
UpperCamelCase__ = filter_value
UpperCamelCase__ = min_tokens_to_keep
def __call__( self :str , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = lax.top_k(lowerCamelCase_ , scores.shape[-1] )
UpperCamelCase__ = jnp.full_like(lowerCamelCase_ , self.filter_value )
UpperCamelCase__ = jax.nn.softmax(lowerCamelCase_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase__ = jnp.roll(lowerCamelCase_ , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
UpperCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jax.lax.sort_key_val(lowerCamelCase_ , lowerCamelCase_ )[-1]
return next_scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :float = -float("Inf" ) , lowerCamelCase_ :int = 1 ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
UpperCamelCase__ = max(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = filter_value
def __call__( self :Tuple , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = scores.shape
UpperCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase__ , UpperCamelCase__ = lax.top_k(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase__ = topk_scores.flatten()
UpperCamelCase__ = topk_indices.flatten() + shift
UpperCamelCase__ = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
UpperCamelCase__ = next_scores_flat.reshape(lowerCamelCase_ , lowerCamelCase_ )
return next_scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
UpperCamelCase__ = bos_token_id
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = max_length
UpperCamelCase__ = eos_token_id
def __call__( self :Optional[Any] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
UpperCamelCase__ = min_length
UpperCamelCase__ = eos_token_id
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = list(lowerCamelCase_ )
UpperCamelCase__ = begin_index
def __call__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase__ = jnp.where(lowerCamelCase_ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowerCamelCase_ )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :list ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = list(lowerCamelCase_ )
def __call__( self :Any , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Dict , lowerCamelCase_ :Any ) -> str:
"""simple docstring"""
UpperCamelCase__ = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase__ = force_token_array.at[index].set(lowerCamelCase_ )
UpperCamelCase__ = jnp.intaa(lowerCamelCase_ )
def __call__( self :List[Any] , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :jnp.ndarray , lowerCamelCase_ :int ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(lowerCamelCase_ :Any ):
UpperCamelCase__ = scores.shape[0]
UpperCamelCase__ = self.force_token_array[generation_idx]
UpperCamelCase__ = jnp.ones_like(lowerCamelCase_ , dtype=scores.dtype ) * -float("inf" )
UpperCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase__ = lax.dynamic_update_slice(lowerCamelCase_ , lowerCamelCase_ , (0, current_token) )
return new_scores
UpperCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase_ ) , lambda: scores , ) , )
return scores
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Any , lowerCamelCase_ :Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = generate_config.eos_token_id
UpperCamelCase__ = generate_config.no_timestamps_token_id
UpperCamelCase__ = generate_config.no_timestamps_token_id + 1
UpperCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ , "max_initial_timestamp_index" ):
UpperCamelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase__ = model_config.vocab_size
def __call__( self :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase_ :List[str] , lowerCamelCase_ :str ):
UpperCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase_ , )
UpperCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase_ , lowerCamelCase_ , )
return jnp.where(
lowerCamelCase_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowerCamelCase_ , )
UpperCamelCase__ = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(cur_len == self.begin_index , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase_ , )
UpperCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase__ = jnp.where(
lowerCamelCase_ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowerCamelCase_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase__ = jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any ):
UpperCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowerCamelCase_ , )
UpperCamelCase__ = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ , lowerCamelCase_ )
return scores
| 516 | 1 |
'''simple docstring'''
def _a ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
snake_case__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case__ : Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''width_multiplier''' ) )
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict=1_3 , snake_case_ : Any=6_4 , snake_case_ : Dict=2 , snake_case_ : Optional[int]=3 , snake_case_ : str="swish" , snake_case_ : str=3 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.0_2 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Dict=1_0 , snake_case_ : Optional[int]=None , snake_case_ : str=0.2_5 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = conv_kernel_size
snake_case__ : Optional[int] = output_stride
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = use_labels
snake_case__ : Optional[Any] = is_training
snake_case__ : int = num_labels
snake_case__ : str = initializer_range
snake_case__ : Dict = scope
snake_case__ : Tuple = width_multiplier
snake_case__ : Optional[Any] = ffn_dropout
snake_case__ : Dict = attn_dropout
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = MobileViTVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[Any] = MobileViTVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : Any = MobileViTVaForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : str = MobileViTVaModelTester(self )
snake_case__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Union[str, Any] = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(snake_case_ ) , snake_case_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Dict = 2
for i in range(len(snake_case_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : int = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = MobileViTVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
snake_case_ )
snake_case__ : Any = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Tuple = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : Any = model.to(snake_case_ )
snake_case__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**snake_case_ )
snake_case__ : Tuple = outputs.logits
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : List[str] = model.to(snake_case_ )
snake_case__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
snake_case__ : str = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0, 6_0)] )
snake_case__ : int = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
snake_case__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
snake_case__ : Any = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 502 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="▁"
a ={
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
a ={
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
a ={
"facebook/s2t-small-librispeech-asr": 1024,
}
a =["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
a ={"mustc": MUSTC_LANGS}
class A_ ( lowercase_ ):
_UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = MAX_MODEL_INPUT_SIZES
_UpperCAmelCase : Any = ["input_ids", "attention_mask"]
_UpperCAmelCase : List[int] = []
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : List[Any]="<pad>" ,SCREAMING_SNAKE_CASE__ : str="<unk>" ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Tuple = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
__lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ ,eos_token=a__ ,unk_token=a__ ,pad_token=a__ ,do_upper_case=a__ ,do_lower_case=a__ ,tgt_lang=a__ ,lang_codes=a__ ,sp_model_kwargs=self.sp_model_kwargs ,**a__ ,)
__lowerCamelCase : Tuple = do_upper_case
__lowerCamelCase : List[str] = do_lower_case
__lowerCamelCase : Tuple = load_json(a__)
__lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : List[str] = spm_file
__lowerCamelCase : List[Any] = load_spm(a__ ,self.sp_model_kwargs)
if lang_codes is not None:
__lowerCamelCase : Optional[Any] = lang_codes
__lowerCamelCase : List[str] = LANGUAGES[lang_codes]
__lowerCamelCase : List[str] = [F"<lang:{lang}>" for lang in self.langs]
__lowerCamelCase : Tuple = {lang: self.sp_model.PieceToId(F"<lang:{lang}>") for lang in self.langs}
__lowerCamelCase : Optional[int] = self.lang_tokens
__lowerCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
__lowerCamelCase : Optional[Any] = {}
@property
def lowerCAmelCase ( self : List[Any]):
return len(self.encoder)
@property
def lowerCAmelCase ( self : str):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(a__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : int = self.lang_code_to_id[tgt_lang]
__lowerCamelCase : Optional[int] = [lang_code_id]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(a__ ,out_type=a__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.encoder.get(a__ ,self.encoder[self.unk_token])
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any):
return self.decoder.get(a__ ,self.unk_token)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Dict = []
__lowerCamelCase : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__lowerCamelCase : Union[str, Any] = self.sp_model.decode(a__)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__lowerCamelCase : Any = []
else:
current_sub_tokens.append(a__)
__lowerCamelCase : List[str] = self.sp_model.decode(a__)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple=None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] = None ,SCREAMING_SNAKE_CASE__ : Tuple = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ ,token_ids_a=a__ ,already_has_special_tokens=a__)
__lowerCamelCase : str = [1] * len(self.prefix_tokens)
__lowerCamelCase : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__)) + suffix_ones
return prefix_ones + ([0] * len(a__)) + ([0] * len(a__)) + suffix_ones
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Optional[Any]):
__lowerCamelCase : Optional[Any] = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
return state
def __setstate__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Optional[Any] = load_spm(self.spm_file ,self.sp_model_kwargs)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any = None):
__lowerCamelCase : List[str] = Path(a__)
assert save_dir.is_dir(), F"{save_directory} should be a directory"
__lowerCamelCase : Dict = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__lowerCamelCase : Dict = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,a__)
if os.path.abspath(self.spm_file) != os.path.abspath(a__) and os.path.isfile(self.spm_file):
copyfile(self.spm_file ,a__)
elif not os.path.isfile(self.spm_file):
with open(a__ ,'wb') as fi:
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (str(a__), str(a__))
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ )
spm.Load(str(lowerCamelCase__ ) )
return spm
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
with open(lowerCamelCase__ , 'r' ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
| 652 |
'''simple docstring'''
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
snake_case_ = 6
snake_case_ = 1
snake_case_ = 1_9_0_1
snake_case_ = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case_ = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
snake_case_ = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
snake_case_ = day - days_per_month[month - 2]
if month > 1_2:
year += 1
snake_case_ = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 400 | 0 |
'''simple docstring'''
import baseaa
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return baseaa.baadecode(__UpperCamelCase ).decode('utf-8' )
if __name__ == "__main__":
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 714 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool ,__UpperCamelCase: list[int] ,__UpperCamelCase: float ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__UpperCamelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,)
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE : List[Any] = math.log(len(__UpperCamelCase ) ,2 )
print('Optimal value : ' ,end='' )
print(minimax(0 ,0 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 508 | 0 |
import sys
from pathlib import Path
UpperCAmelCase_ : Optional[int] = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCAmelCase_ : Union[str, Any] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
UpperCAmelCase_ : int = "zero2"
UpperCAmelCase_ : List[Any] = "zero3"
UpperCAmelCase_ : Union[str, Any] = [ZEROa, ZEROa]
def UpperCamelCase ( _A : str , _A : Tuple , _A : str )-> str:
"""simple docstring"""
A__ = parameterized.to_safe_name("_".join(str(_lowercase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
UpperCAmelCase_ : Any = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCamelCase ( lowerCamelCase_ ):
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def __A ( self , UpperCAmelCase__ ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 10 , UpperCAmelCase__ = True , UpperCAmelCase__ = True , UpperCAmelCase__ = True , ):
A__ = models[model]
A__ = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_ )
return output_dir
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = True , UpperCAmelCase__ = True , ):
A__ = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase_ )
A__ = F"""\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n """.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
A__ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
A__ = self.get_launcher(lowerCamelCase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
return output_dir
def __A ( self , UpperCAmelCase__=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A__ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result
| 34 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( lowerCAmelCase__: list[float] ) -> bool:
if len(lowerCAmelCase__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
SCREAMING_SNAKE_CASE_ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _UpperCamelCase ( lowerCAmelCase__: str ) -> Tuple:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE_ = model_type_to_module_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = importlib.import_module(F""".{module_name}""" ,'transformers.models' )
try:
return getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ ,'__name__' ,lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE_ = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
return None
def _UpperCamelCase ( lowerCAmelCase__: Union[str, os.PathLike] ,lowerCAmelCase__: Optional[Union[str, os.PathLike]] = None ,lowerCAmelCase__: bool = False ,lowerCAmelCase__: bool = False ,lowerCAmelCase__: Optional[Dict[str, str]] = None ,lowerCAmelCase__: Optional[Union[bool, str]] = None ,lowerCAmelCase__: Optional[str] = None ,lowerCAmelCase__: bool = False ,**lowerCAmelCase__: int ,) -> str:
SCREAMING_SNAKE_CASE_ = get_file_from_repo(
lowerCAmelCase__ ,lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,use_auth_token=lowerCAmelCase__ ,revision=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase__ ,encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase__ )
class snake_case :
"""simple docstring"""
def __init__( self ) -> str:
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def a__ ( cls, _lowercase, **_lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE_ = kwargs.pop('config', _lowercase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('trust_remote_code', _lowercase )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = config_dict.get('feature_extractor_type', _lowercase )
SCREAMING_SNAKE_CASE_ = None
if "AutoFeatureExtractor" in config_dict.get('auto_map', {} ):
SCREAMING_SNAKE_CASE_ = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase, _lowercase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowercase, **_lowercase )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, 'feature_extractor_type', _lowercase )
if hasattr(_lowercase, 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE_ = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE_ = feature_extractor_class_from_name(_lowercase )
SCREAMING_SNAKE_CASE_ = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE_ = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE_ = resolve_trust_remote_code(
_lowercase, _lowercase, _lowercase, _lowercase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE_ = get_class_from_dynamic_module(
_lowercase, _lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('code_revision', _lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase, **_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase, **_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE_ = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase, **_lowercase )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def a__ ( _lowercase, _lowercase ) -> Tuple:
FEATURE_EXTRACTOR_MAPPING.register(_lowercase, _lowercase )
| 238 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCAmelCase_ ( lowercase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE_ : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname ,'dpr_tokenizer' )
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(snake_case__ ,DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : int = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(self.tmpdirname ,'bart_tokenizer' )
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(snake_case__ ,BART_VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(snake_case__ ,BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) )
def snake_case ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'dpr_tokenizer' ) )
def snake_case ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,'bart_tokenizer' ) )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' ,string_factory='Flat' ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ : Any = dataset
SCREAMING_SNAKE_CASE_ : str = RagRetriever(
snake_case__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name='custom' ,)
if from_disk:
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(self.tmpdirname ,'dataset' )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname ,'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname ,'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname ,'dataset' ) )
del dataset
SCREAMING_SNAKE_CASE_ : List[str] = RagRetriever(
snake_case__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = RagRetriever(
snake_case__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,snake_case__ ) ,)
return retriever
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' ,string_factory='Flat' ,metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname ,'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' ,index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] ,open(index_file_name + '.index_meta.dpr' ,'wb' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(self.tmpdirname ,'psgs_w100.tsv.pkl' )
SCREAMING_SNAKE_CASE_ : List[Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(snake_case__ ,open(snake_case__ ,'wb' ) )
SCREAMING_SNAKE_CASE_ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name='legacy' ,index_path=self.tmpdirname ,)
SCREAMING_SNAKE_CASE_ : Tuple = RagRetriever(
snake_case__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = retriever.retrieve(snake_case__ ,n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) ,snake_case__ )
self.assertEqual(doc_dicts[0]['id'][0] ,'1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] ,'0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_dataset()
retriever.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : str = retriever.retrieve(snake_case__ ,n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = retriever.retrieve(snake_case__ ,n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) ,snake_case__ )
self.assertEqual(doc_dicts[0]['id'][0] ,'1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] ,'0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = retriever.retrieve(snake_case__ ,n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = retriever.retrieve(snake_case__ ,n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) ,snake_case__ )
self.assertEqual(doc_dicts[0]['id'][0] ,'1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] ,'0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : int = retriever.retrieve(snake_case__ ,n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = retriever.retrieve(snake_case__ ,n_docs=snake_case__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) ,snake_case__ )
self.assertEqual(doc_dicts[0]['text'][0] ,'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] ,'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = RagRetriever.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : List[str] = retriever.retrieve(snake_case__ ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self ):
import torch
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ : Optional[int] = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = retriever(snake_case__ ,snake_case__ ,prefix=retriever.config.generator.prefix ,n_docs=snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertIsInstance(snake_case__ ,np.ndarray )
SCREAMING_SNAKE_CASE_ : List[Any] = retriever(
snake_case__ ,snake_case__ ,prefix=retriever.config.generator.prefix ,n_docs=snake_case__ ,return_tensors='pt' ,)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case__ ,torch.Tensor )
self.assertIsInstance(snake_case__ ,torch.Tensor )
self.assertIsInstance(snake_case__ ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case__ )
retriever.set_ctx_encoder_tokenizer(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
SCREAMING_SNAKE_CASE_ : Any = retriever(snake_case__ ,snake_case__ ,prefix=retriever.config.generator.prefix ,n_docs=snake_case__ )
self.assertEqual(
len(snake_case__ ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) ,snake_case__ ) # check for doc token related keys in dictionary.
| 105 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
lowercase__ = [int(A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(A ) == 4 and all(0 <= int(A ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = input().strip()
lowerCamelCase : Union[str, Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 460 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
SCREAMING_SNAKE_CASE : str = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] ):
if "handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
SCREAMING_SNAKE_CASE : Union[str, Any] = 10_24
SCREAMING_SNAKE_CASE : Optional[Any] = 40_96
SCREAMING_SNAKE_CASE : List[str] = 24
SCREAMING_SNAKE_CASE : Any = 16
SCREAMING_SNAKE_CASE : Optional[Any] = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = '''relu'''
SCREAMING_SNAKE_CASE : Tuple = 10_24
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
# load HuggingFace model
SCREAMING_SNAKE_CASE : List[Any] = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=_SCREAMING_SNAKE_CASE )['''model''']
SCREAMING_SNAKE_CASE : int = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith('''decoder''' ) and "output_projection" not in key:
SCREAMING_SNAKE_CASE : Tuple = val
else:
SCREAMING_SNAKE_CASE : int = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
SCREAMING_SNAKE_CASE : Tuple = RobertaTokenizer.from_pretrained('''roberta-large''' )
SCREAMING_SNAKE_CASE : Dict = TrOCRProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
# verify logits
SCREAMING_SNAKE_CASE : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
SCREAMING_SNAKE_CASE : Dict = model(pixel_values=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
snake_case_ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 355 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : int = 0
if start < end:
SCREAMING_SNAKE_CASE : Optional[int] = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = a[end]
SCREAMING_SNAKE_CASE : List[str] = a[pivot]
SCREAMING_SNAKE_CASE : Dict = temp
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = _in_place_partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , p - 1 )
count += _in_place_quick_sort(_SCREAMING_SNAKE_CASE , p + 1 , _SCREAMING_SNAKE_CASE )
return count
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Tuple = randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = a[end]
SCREAMING_SNAKE_CASE : int = a[pivot]
SCREAMING_SNAKE_CASE : Tuple = temp
SCREAMING_SNAKE_CASE : Union[str, Any] = start - 1
for index in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE : Tuple = new_pivot_index + 1
SCREAMING_SNAKE_CASE : Dict = a[new_pivot_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = a[index]
SCREAMING_SNAKE_CASE : Optional[int] = temp
SCREAMING_SNAKE_CASE : List[Any] = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE : Any = a[end]
SCREAMING_SNAKE_CASE : Union[str, Any] = temp
return new_pivot_index + 1, count
snake_case_ = TemporaryFile()
snake_case_ = 100 # 1000 elements are to be sorted
snake_case_ , snake_case_ = 0, 1 # mean and standard deviation
snake_case_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
snake_case_ = np.load(outfile)
snake_case_ = len(M) - 1
snake_case_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 355 | 1 |
_lowerCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] ):
# Return True if there is node that has not iterated.
SCREAMING_SNAKE_CASE__ = [False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [s]
SCREAMING_SNAKE_CASE__ = True
while queue:
SCREAMING_SNAKE_CASE__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = u
return visited[t]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [-1] * (len(UpperCamelCase__ ))
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = float("""Inf""" )
SCREAMING_SNAKE_CASE__ = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE__ = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE__ = sink
while v != source:
SCREAMING_SNAKE_CASE__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE__ = parent[v]
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 6 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = 13
SCREAMING_SNAKE_CASE : Any = 7
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : Dict = 0.1
SCREAMING_SNAKE_CASE : Tuple = 0.1
SCREAMING_SNAKE_CASE : Dict = 512
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : int = 0.02
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : int = '''last'''
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = 0
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __A ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFFlaubertModel(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertWithLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFFlaubertForSequenceClassification(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFFlaubertForTokenClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE : int = TFFlaubertForMultipleChoice(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFFlaubertModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , emb_dim=37 )
def __A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase__ )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = TFFlaubertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[Any] = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 34 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
# fmt: off
__magic_name__ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__magic_name__ = dict(zip(__a , range(len(__a ) ) ) )
__magic_name__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__magic_name__ = {"""unk_token""": """<unk>"""}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
__magic_name__ = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__magic_name__ = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def a__ ( self : List[str] , **UpperCamelCase_ : Any ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **__a )
def a__ ( self : str , **UpperCamelCase_ : List[str] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **__a )
def a__ ( self : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def a__ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Any ):
'''simple docstring'''
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = self.get_image_processor()
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def a__ ( self : Any ):
'''simple docstring'''
__magic_name__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__magic_name__ = self.get_image_processor(do_normalize=__a )
__magic_name__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(__a , return_tensors='np' )
__magic_name__ = processor(images=__a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__magic_name__ = """lower newer"""
__magic_name__ = processor(text=__a , return_tensors='np' )
__magic_name__ = tokenizer(__a , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__magic_name__ = """lower newer"""
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = """google/owlvit-base-patch32"""
__magic_name__ = OwlViTProcessor.from_pretrained(__a )
__magic_name__ = ["""cat""", """nasa badge"""]
__magic_name__ = processor(text=__a )
__magic_name__ = 1_6
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = """google/owlvit-base-patch32"""
__magic_name__ = OwlViTProcessor.from_pretrained(__a )
__magic_name__ = [["""cat""", """nasa badge"""], ["""person"""]]
__magic_name__ = processor(text=__a )
__magic_name__ = 1_6
__magic_name__ = len(__a )
__magic_name__ = max([len(__a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = """google/owlvit-base-patch32"""
__magic_name__ = OwlViTProcessor.from_pretrained(__a )
__magic_name__ = ["""cat""", """nasa badge"""]
__magic_name__ = processor(text=__a )
__magic_name__ = 1_6
__magic_name__ = inputs["""input_ids"""]
__magic_name__ = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(images=__a , query_images=__a )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(__a )
__magic_name__ = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 545 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case_ ( lowerCAmelCase_ : ndarray ):
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ) -> None:
"""simple docstring"""
__lowercase : Any = regularization
__lowercase : List[str] = gamma
if kernel == "linear":
__lowercase : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
__lowercase : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__lowercase : List[Any] = F"Unknown kernel: {kernel}"
raise ValueError(__a )
def lowerCAmelCase ( self : int , __a : ndarray , __a : ndarray ) -> float:
"""simple docstring"""
return np.dot(__a , __a )
def lowerCAmelCase ( self : str , __a : ndarray , __a : ndarray ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : Optional[int] , __a : list[ndarray] , __a : ndarray ) -> None:
"""simple docstring"""
__lowercase : List[Any] = observations
__lowercase : Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__lowercase) , ) : Union[str, Any] = np.shape(__a )
def to_minimize(__a : ndarray ) -> float:
__lowercase : str = 0
((__lowercase) , ) : Tuple = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__a )
__lowercase : Tuple = LinearConstraint(__a , 0 , 0 )
__lowercase : List[Any] = Bounds(0 , self.regularization )
__lowercase : Dict = minimize(
__a , np.ones(__a ) , bounds=__a , constraints=[ly_contraint] ).x
__lowercase : str = l_star
# calculating mean offset of separation plane to points
__lowercase : Optional[Any] = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__lowercase : Any = s / n
def lowerCAmelCase ( self : Any , __a : ndarray ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
@dataclass
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Tuple = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **snake_case : Tuple ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE : List[Any] = deprecated_arg[3:]
setattr(self , snake_case , not kwargs.pop(snake_case ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE : int = kwargs.pop('torchscript' , self.torchscript )
SCREAMING_SNAKE_CASE : str = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**snake_case )
UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Trace the models using torchscript'})
UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Print Xla/PyTorch tpu metrics'})
UpperCAmelCase : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
SCREAMING_SNAKE_CASE : Dict = torch.device('cpu' )
SCREAMING_SNAKE_CASE : Tuple = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE : List[Any] = xm.xla_device()
SCREAMING_SNAKE_CASE : List[Any] = 0
else:
SCREAMING_SNAKE_CASE : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE : str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.n_gpu > 0
| 308 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 'git_vision_model'
def __init__( self : Optional[Any] , snake_case : Any=768 , snake_case : List[str]=3072 , snake_case : Optional[Any]=12 , snake_case : Optional[Any]=12 , snake_case : Tuple=3 , snake_case : str=224 , snake_case : Tuple=16 , snake_case : Union[str, Any]="quick_gelu" , snake_case : Dict=1E-5 , snake_case : int=0.0 , snake_case : Union[str, Any]=0.02 , **snake_case : int , ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : str = hidden_act
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , snake_case : Union[str, os.PathLike] , **snake_case : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE : Optional[int] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : int = 'git'
def __init__( self : Union[str, Any] , snake_case : str=None , snake_case : List[str]=30522 , snake_case : Optional[Any]=768 , snake_case : Optional[Any]=6 , snake_case : Union[str, Any]=12 , snake_case : Union[str, Any]=3072 , snake_case : Dict="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : str=1024 , snake_case : Tuple=0.02 , snake_case : Dict=1E-12 , snake_case : List[str]=0 , snake_case : Optional[int]="absolute" , snake_case : Optional[int]=True , snake_case : Optional[int]=False , snake_case : Optional[Any]=101 , snake_case : Optional[int]=102 , snake_case : int=None , **snake_case : Any , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case )
if vision_config is None:
SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : int = tie_word_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 308 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 452 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[str] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A = logging.get_logger(__name__)
A = {}
A = {}
A = {}
def lowerCamelCase ( UpperCamelCase : type , UpperCamelCase : Optional[str] , UpperCamelCase : Optional[List[str]] = None , ) -> Optional[Any]:
_lowerCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_lowerCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_lowerCamelCase = format_type
def lowerCamelCase ( UpperCamelCase : Exception , UpperCamelCase : Optional[str] , UpperCamelCase : Optional[List[str]] = None ) -> Dict:
_lowerCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowerCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
A = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
A = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
A = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase ( UpperCamelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase ( UpperCamelCase : Optional[str] , **UpperCamelCase : List[Any] ) -> Formatter:
_lowerCamelCase = get_format_type_from_alias(UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 234 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A = logging.get_logger(__name__)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = 'maskformer'
lowerCAmelCase_ = {'hidden_size': 'mask_feature_size'}
lowerCAmelCase_ = ['resnet', 'swin']
lowerCAmelCase_ = ['detr']
def __init__( self : int , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : float = 0.1 , snake_case__ : bool = False , snake_case__ : Optional[Dict] = None , snake_case__ : Optional[Dict] = None , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 20.0 , snake_case__ : Optional[bool] = None , **snake_case__ : Dict , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCamelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(snake_case__ , snake_case__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCamelCase = DetrConfig()
else:
# verify that the decoder is supported
_lowerCamelCase = (
decoder_config.pop('model_type' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(snake_case__ , snake_case__ ):
_lowerCamelCase = CONFIG_MAPPING[decoder_type]
_lowerCamelCase = config_class.from_dict(snake_case__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = decoder_config
# main feature dimension for the model
_lowerCamelCase = fpn_feature_size
_lowerCamelCase = mask_feature_size
# initializer
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
# Hungarian matcher && loss
_lowerCamelCase = cross_entropy_weight
_lowerCamelCase = dice_weight
_lowerCamelCase = mask_weight
_lowerCamelCase = use_auxiliary_loss
_lowerCamelCase = no_object_weight
_lowerCamelCase = output_auxiliary_logits
_lowerCamelCase = self.decoder_config.encoder_attention_heads
_lowerCamelCase = self.decoder_config.num_hidden_layers
super().__init__(**snake_case__ )
@classmethod
def _snake_case ( cls : Optional[int] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Tuple ) -> List[str]:
return cls(
backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , )
def _snake_case ( self : Optional[Any] ) -> Dict[str, any]:
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.decoder_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 234 | 1 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_UpperCAmelCase ) == 1:
return True
a_ = series[1] - series[0]
for index in range(len(_UpperCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a ( _UpperCAmelCase ) -> float:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
a_ = 0
for val in series:
answer += val
return answer / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_UpperCamelCase = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A_)
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = """sgugger/tiny-distilbert-classification"""
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = AutoConfig.from_pretrained(A_)
# set architectures equal to `None`
_UpperCamelCase = None
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_ , configs=[config])
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = AutoConfig.from_pretrained(A_)
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_ , configs=[config])
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tinier_bart"""
_UpperCamelCase = AutoConfig.from_pretrained(A_)
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_ , configs=[config])
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
_UpperCamelCase = AutoConfig.from_pretrained(A_)
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_ , configs=[config])
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tinier_bart"""
_UpperCamelCase = AutoConfig.from_pretrained(A_)
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_ , configs=[config])
_UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , "inf_time.csv") , train_memory_csv_file=os.path.join(A_ , "train_mem.csv") , inference_memory_csv_file=os.path.join(A_ , "inf_mem.csv") , train_time_csv_file=os.path.join(A_ , "train_time.csv") , env_info_csv_file=os.path.join(A_ , "env.csv") , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(A_ , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(A_ , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(A_ , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(A_ , "env.csv")).exists())
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowercase_ : int):
self.assertTrue(hasattr(A_ , "sequential"))
self.assertTrue(hasattr(A_ , "cumulative"))
self.assertTrue(hasattr(A_ , "current"))
self.assertTrue(hasattr(A_ , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , "log.txt") , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
_UpperCamelCase = PyTorchBenchmark(A_)
_UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(A_ , "log.txt")).exists())
| 547 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = '''ernie_m'''
__SCREAMING_SNAKE_CASE = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , A_ = 25_00_02 , A_ = 7_68 , A_ = 12 , A_ = 12 , A_ = 30_72 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 5_14 , A_ = 0.0_2 , A_ = 1 , A_ = 1e-05 , A_=None , A_=False , A_=0.0 , **A_ , ):
super().__init__(pad_token_id=A_ , **A_ )
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : str = classifier_dropout
_UpperCAmelCase : Any = is_decoder
_UpperCAmelCase : Union[str, Any] = act_dropout
| 643 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[str] = logging.get_logger(__name__)
__a : Union[str, Any] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Union[str, Any] = """unispeech"""
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=1E-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.05 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=320 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=100 , lowerCamelCase=256 , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase="mean" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=80 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=0.5 , **lowerCamelCase , ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = feat_extract_norm
lowercase__ : Dict = feat_extract_activation
lowercase__ : List[str] = list(lowerCamelCase )
lowercase__ : int = list(lowerCamelCase )
lowercase__ : int = list(lowerCamelCase )
lowercase__ : Union[str, Any] = conv_bias
lowercase__ : Dict = num_conv_pos_embeddings
lowercase__ : Optional[int] = num_conv_pos_embedding_groups
lowercase__ : Any = len(self.conv_dim )
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : List[str] = num_attention_heads
lowercase__ : Any = hidden_dropout
lowercase__ : List[str] = attention_dropout
lowercase__ : int = activation_dropout
lowercase__ : Tuple = feat_proj_dropout
lowercase__ : Optional[int] = final_dropout
lowercase__ : Dict = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : int = initializer_range
lowercase__ : str = num_ctc_classes
lowercase__ : Optional[Any] = vocab_size
lowercase__ : List[Any] = do_stable_layer_norm
lowercase__ : Optional[int] = use_weighted_layer_sum
lowercase__ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[str] = apply_spec_augment
lowercase__ : List[Any] = mask_time_prob
lowercase__ : Optional[Any] = mask_time_length
lowercase__ : List[str] = mask_time_min_masks
lowercase__ : List[str] = mask_feature_prob
lowercase__ : Optional[int] = mask_feature_length
lowercase__ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[Any] = num_codevectors_per_group
lowercase__ : Tuple = num_codevector_groups
lowercase__ : Union[str, Any] = contrastive_logits_temperature
lowercase__ : List[str] = feat_quantizer_dropout
lowercase__ : Union[str, Any] = num_negatives
lowercase__ : int = codevector_dim
lowercase__ : Optional[Any] = proj_codevector_dim
lowercase__ : int = diversity_loss_weight
# ctc loss
lowercase__ : str = ctc_loss_reduction
lowercase__ : Optional[int] = ctc_zero_infinity
# pretraining loss
lowercase__ : Union[str, Any] = replace_prob
@property
def __a ( self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 298 |
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowercase__ : List[str] = [True] * limit
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
lowercase__ : List[str] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowercase__ : Dict = i * 2
while index < limit:
lowercase__ : Union[str, Any] = False
lowercase__ : str = index + i
lowercase__ : Union[str, Any] = [2]
for i in range(3 ,SCREAMING_SNAKE_CASE_ ,2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
lowercase__ : Any = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length ,len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Dict = j - i
lowercase__ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 298 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = old_name
if "patch_embed" in old_name:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Dict = old_name.split("." )
if layer == "0":
UpperCAmelCase__: Optional[int] = old_name.replace("0" ,"convolution1" )
elif layer == "1":
UpperCAmelCase__: Dict = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
UpperCAmelCase__: int = old_name.replace("3" ,"convolution2" )
else:
UpperCAmelCase__: List[Any] = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[int] = R"\b\d{2}\b"
if bool(re.search(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__: Union[str, Any] = re.search(R"\d\.\d\d." ,SCREAMING_SNAKE_CASE ).group()
else:
UpperCAmelCase__: Tuple = re.search(R"\d\.\d." ,SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCAmelCase__: Tuple = old_name.replace(SCREAMING_SNAKE_CASE ,"" )
UpperCAmelCase__: Any = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
UpperCAmelCase__: Optional[Any] = "intermediate_stages." + trimmed_name
else:
UpperCAmelCase__: Optional[int] = old_name.replace(SCREAMING_SNAKE_CASE ,"" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase__: Union[str, Any] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
UpperCAmelCase__: Optional[int] = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase__: Dict = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
UpperCAmelCase__: List[str] = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
UpperCAmelCase__: Dict = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
UpperCAmelCase__: Optional[Any] = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
UpperCAmelCase__: List[Any] = trimmed_name.replace("fc2" ,"linear_out" )
UpperCAmelCase__: List[Any] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[Any] = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
UpperCAmelCase__: Dict = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase__: List[str] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase__: Any = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
UpperCAmelCase__: Dict = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
UpperCAmelCase__: int = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
UpperCAmelCase__: Optional[int] = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
UpperCAmelCase__: str = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase__: Optional[Any] = new_name.replace("norm" ,"layernorm" )
UpperCAmelCase__: List[Any] = "efficientformer." + new_name
else:
UpperCAmelCase__: List[str] = "efficientformer.encoder." + new_name
return new_name
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
for key in checkpoint.copy().keys():
UpperCAmelCase__: Union[str, Any] = checkpoint.pop(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Union[str, Any] = val
return checkpoint
def _A ( ):
UpperCAmelCase__: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__: str = Image.open(requests.get(SCREAMING_SNAKE_CASE ,stream=SCREAMING_SNAKE_CASE ).raw )
return image
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Dict = torch.load(SCREAMING_SNAKE_CASE ,map_location="cpu" )["model"]
UpperCAmelCase__: Tuple = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Union[str, Any] = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Dict = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
UpperCAmelCase__: Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase__: int = convert_torch_checkpoint(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__: str = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase__: int = prepare_img()
UpperCAmelCase__: Optional[Any] = 2_5_6
UpperCAmelCase__: Dict = 2_2_4
UpperCAmelCase__: Optional[Any] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
UpperCAmelCase__: Optional[Any] = processor(images=SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
# original processing pipeline
UpperCAmelCase__: int = Compose(
[
Resize(SCREAMING_SNAKE_CASE ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ),
] )
UpperCAmelCase__: List[str] = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = model(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Union[str, Any] = outputs.logits
UpperCAmelCase__: int = (1, 1_0_0_0)
if "l1" in model_name:
UpperCAmelCase__: Optional[int] = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :1_0] ,SCREAMING_SNAKE_CASE ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase__: List[Any] = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :1_0] ,SCREAMING_SNAKE_CASE ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase__: Any = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" ,commit_message="Add model" ,use_temp_dir=SCREAMING_SNAKE_CASE ,)
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" ,commit_message="Add image processor" ,use_temp_dir=SCREAMING_SNAKE_CASE ,)
if __name__ == "__main__":
_lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Tuple =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 113 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowerCAmelCase : int ="""scheduler_config.json"""
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 3
__magic_name__ = 4
__magic_name__ = 5
__magic_name__ = 6
__magic_name__ = 7
__magic_name__ = 8
__magic_name__ = 9
__magic_name__ = 1_0
__magic_name__ = 1_1
__magic_name__ = 1_2
__magic_name__ = 1_3
__magic_name__ = 1_4
@dataclass
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = 42
class __UpperCamelCase :
'''simple docstring'''
__magic_name__ = SCHEDULER_CONFIG_NAME
__magic_name__ = []
__magic_name__ = True
@classmethod
def _UpperCAmelCase ( cls , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , **lowerCamelCase__ , ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Tuple = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ , subfolder=lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , return_commit_hash=lowerCamelCase__ , **lowerCamelCase__ , )
return cls.from_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False , **lowerCamelCase__ ):
self.save_config(save_directory=lowerCamelCase__ , push_to_hub=lowerCamelCase__ , **lowerCamelCase__ )
@property
def _UpperCAmelCase ( self ):
return self._get_compatibles()
@classmethod
def _UpperCAmelCase ( cls ):
UpperCAmelCase__: List[str] = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase__: Union[str, Any] = importlib.import_module(__name__.split("." )[0] )
UpperCAmelCase__: int = [
getattr(lowerCamelCase__ , lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ , lowerCamelCase__ )
]
return compatible_classes
| 113 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
__A : Any = inspect.getfile(accelerate.test_utils )
__A : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__A : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__A : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def _a ( self ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
__A : int = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
@require_multi_gpu
def _a ( self ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
__A : List[str] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
@require_multi_gpu
def _a ( self ):
'''simple docstring'''
__A : Optional[int] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
@require_multi_gpu
def _a ( self ):
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
__A : List[str] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] =Accelerator()
lowerCamelCase : Union[str, Any] =(accelerator.state.process_index + 2, 10)
lowerCamelCase : int =torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase : Optional[Any] =''''''
lowerCamelCase : Any =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase : List[Any] =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase : Dict =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 708 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase : Optional[Any] ='''src/transformers'''
lowerCamelCase : Optional[int] ='''docs/source/en'''
lowerCamelCase : Dict ='''.'''
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> List[Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
# Find the start prompt.
__A : List[str] = 0
while not lines[start_index].startswith(_SCREAMING_SNAKE_CASE ):
start_index += 1
start_index += 1
__A : Optional[int] = start_index
while not lines[end_index].startswith(_SCREAMING_SNAKE_CASE ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase : List[str] ='''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[int] =re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase : Optional[int] =re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Optional[int] =re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Optional[Any] =direct_transformers_import(TRANSFORMERS_PATH)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
'''simple docstring'''
__A : int = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _SCREAMING_SNAKE_CASE )
return [m.group(0 ) for m in matches]
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
__A : Union[str, Any] = 2 if text == '✅' or text == '❌' else len(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = (width - text_length) // 2
__A : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
__A : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__A : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__A : List[str] = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__A : Tuple = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[str] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : List[Any] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = collections.defaultdict(_SCREAMING_SNAKE_CASE )
# Let's lookup through all transformers object (once).
for attr_name in dir(_SCREAMING_SNAKE_CASE ):
__A : List[Any] = None
if attr_name.endswith('Tokenizer' ):
__A : List[str] = slow_tokenizers
__A : Dict = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__A : Dict = fast_tokenizers
__A : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : int = tf_models
__A : Dict = _re_tf_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_flax_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : Tuple = flax_models
__A : Union[str, Any] = _re_flax_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
elif _re_pt_models.match(_SCREAMING_SNAKE_CASE ) is not None:
__A : Optional[int] = pt_models
__A : str = _re_pt_models.match(_SCREAMING_SNAKE_CASE ).groups()[0]
if lookup_dict is not None:
while len(_SCREAMING_SNAKE_CASE ) > 0:
if attr_name in model_name_to_prefix.values():
__A : int = True
break
# Try again after removing the last word in the name
__A : Any = ''.join(camel_case_split(_SCREAMING_SNAKE_CASE )[:-1] )
# Let's build that table!
__A : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__A : Any = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__A : Tuple = [len(_SCREAMING_SNAKE_CASE ) + 2 for c in columns]
__A : int = max([len(_SCREAMING_SNAKE_CASE ) for name in model_names] ) + 2
# Build the table per se
__A : Any = '|' + '|'.join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__A : int = {True: '✅', False: '❌'}
for name in model_names:
__A : str = model_name_to_prefix[name]
__A : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for l, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] ) + "|\n"
return table
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str]=False ) -> Any:
'''simple docstring'''
__A , __A , __A , __A : Tuple = _find_text_in_file(
filename=os.path.join(_SCREAMING_SNAKE_CASE , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__A : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 237 | 0 |
'''simple docstring'''
__UpperCAmelCase = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 379 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE : Dict = n - k
# Calculate C(n,k)
for i in range(lowerCamelCase_ ):
result *= n - i
result //= i + 1
return result
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , lowerCamelCase_ ) // (node_count + 1)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
SCREAMING_SNAKE_CASE : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return catalan_number(lowerCamelCase_ ) * factorial(lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 379 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : str = XLNetTokenizer
__lowerCAmelCase : List[Any] = XLNetTokenizerFast
__lowerCAmelCase : int = True
__lowerCAmelCase : str = True
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : List[str] = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = """<s>"""
lowercase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__) , UpperCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__) , UpperCamelCase__)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(UpperCamelCase__) , 10_06)
def lowercase__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__)
lowercase__ : List[Any] = tokenizer.tokenize("""This is a test""")
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [2_85, 46, 10, 1_70, 3_82])
lowercase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4])
lowercase__ : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase__)
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__)
lowercase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__)
lowercase__ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
lowercase__ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__)
lowercase__ : str = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__)
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
lowercase__ : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 700 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if components is None:
lowercase__ : List[str] = []
lowercase__ : Dict = list(SCREAMING_SNAKE_CASE_)
def __len__( self):
'''simple docstring'''
return len(self.__components)
def __str__( self):
'''simple docstring'''
return "(" + ",".join(map(SCREAMING_SNAKE_CASE_ , self.__components)) + ")"
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else:
raise Exception("""must have the same size""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""must have the same size""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (float, int)):
lowercase__ : Optional[int] = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and len(self) == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Dict = len(self)
lowercase__ : Optional[Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return sum(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""invalid operand!""")
def lowercase__ ( self):
'''simple docstring'''
return Vector(self.__components)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("""index out of range""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert -len(self.__components) <= pos < len(self.__components)
lowercase__ : List[Any] = value
def lowercase__ ( self):
'''simple docstring'''
if len(self.__components) == 0:
raise Exception("""Vector is empty""")
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False):
'''simple docstring'''
lowercase__ : Union[str, Any] = self * other
lowercase__ : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def UpperCamelCase ( lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
return Vector([0] * dimension )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ ) and (isinstance(lowercase_ , lowercase_ ))
lowercase__ : Union[str, Any] = [0] * dimension
lowercase__ : Any = 1
return Vector(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert (
isinstance(lowercase_ , lowercase_ )
and isinstance(lowercase_ , lowercase_ )
and (isinstance(lowercase_ , (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : int = [random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )]
return Vector(lowercase_ )
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = matrix
lowercase__ : Any = w
lowercase__ : Any = h
def __str__( self):
'''simple docstring'''
lowercase__ : str = """"""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Tuple = []
for i in range(self.__height):
lowercase__ : Tuple = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrix must have the same dimension!""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Optional[int] = []
for i in range(self.__height):
lowercase__ : List[str] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrices must have the same dimension!""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # matrix-vector
if len(SCREAMING_SNAKE_CASE_) == self.__width:
lowercase__ : List[Any] = zero_vector(self.__height)
for i in range(self.__height):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
ans.change_component(SCREAMING_SNAKE_CASE_ , sum(SCREAMING_SNAKE_CASE_))
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""")
elif isinstance(SCREAMING_SNAKE_CASE_ , (int, float)): # matrix-scalar
lowercase__ : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
return None
def lowercase__ ( self):
'''simple docstring'''
return self.__height
def lowercase__ ( self):
'''simple docstring'''
return self.__width
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Tuple = value
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
lowercase__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width - 1 , self.__height - 1).determinant()
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
else:
raise Exception("""Indices out of bounds""")
def lowercase__ ( self):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if self.__height < 1:
raise Exception("""Matrix has no element""")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Optional[int] = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE_) for y in range(self.__width)
]
return sum(SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( lowercase_ ) -> Matrix:
'''simple docstring'''
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowercase_ )]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Matrix:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : list[list[float]] = [
[random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )] for _ in range(lowercase_ )
]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
| 495 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowercase =DatasetInfosDict.from_directory(lowercase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , ),
] , )
def UpperCamelCase ( lowercase_ : Tuple , lowercase_ : DatasetInfo ) -> List[str]:
'''simple docstring'''
lowercase =str(lowercase_ )
dataset_info.write_to_directory(lowercase_ )
lowercase =DatasetInfo.from_directory(lowercase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase_ , '''dataset_info.json''' ) )
def UpperCamelCase ( ) -> str:
'''simple docstring'''
lowercase =DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
lowercase =dataset_info._to_yaml_dict()
assert sorted(lowercase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase =yaml.safe_dump(lowercase_ )
lowercase =yaml.safe_load(lowercase_ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowercase =DatasetInfo()
lowercase =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=4_2 ),
'''v2''': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : DatasetInfosDict ) -> Optional[int]:
'''simple docstring'''
lowercase =str(lowercase_ )
dataset_infos_dict.write_to_directory(lowercase_ )
lowercase =DatasetInfosDict.from_directory(lowercase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase_ , '''README.md''' ) )
| 72 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''umt5'''
__UpperCamelCase = ['''past_key_values''']
def __init__( self :int , snake_case :Optional[Any]=250_112 , snake_case :Optional[int]=512 , snake_case :Any=64 , snake_case :Union[str, Any]=1_024 , snake_case :Tuple=8 , snake_case :Optional[int]=None , snake_case :Union[str, Any]=6 , snake_case :List[Any]=32 , snake_case :Dict=128 , snake_case :List[str]=0.1 , snake_case :List[Any]=1e-6 , snake_case :Dict=1.0 , snake_case :Union[str, Any]="gated-gelu" , snake_case :Union[str, Any]=True , snake_case :Any=True , snake_case :List[str]="T5Tokenizer" , snake_case :Union[str, Any]=True , snake_case :Union[str, Any]=0 , snake_case :List[Any]=1 , snake_case :List[Any]=0 , **snake_case :Any , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=snake_case , tokenizer_class=snake_case , tie_word_embeddings=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
A_ : Union[str, Any] = vocab_size
A_ : Tuple = d_model
A_ : List[str] = d_kv
A_ : Union[str, Any] = d_ff
A_ : Any = num_layers
A_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : List[Any] = num_heads
A_ : List[str] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : Optional[Any] = dropout_rate
A_ : Any = layer_norm_epsilon
A_ : List[Any] = initializer_factor
A_ : Any = feed_forward_proj
A_ : Optional[Any] = use_cache
A_ : int = self.feed_forward_proj.split("-" )
A_ : Any = act_info[-1]
A_ : Tuple = act_info[0] == "gated"
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
A_ : Optional[Any] = "gelu_new"
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return self.d_model
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return self.num_heads
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self.num_layers
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
A_ : Any = "past_encoder_sequence + sequence"
A_ : Union[str, Any] = {0: "batch"}
A_ : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A_ : Dict = {0: "batch", 1: "decoder_sequence"}
A_ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return 13
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return 5e-4
| 454 | 0 |
"""simple docstring"""
import socket
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase : List[str] = socket.gethostname()
UpperCAmelCase : Any = 1_23_12
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
UpperCAmelCase : int = sock.recv(10_24 )
if not data:
break
out_file.write(_lowercase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
snake_case_ : str = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
_UpperCamelCase = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = ['input_ids', 'attention_mask']
__UpperCamelCase : Union[str, Any] = NllbTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , snake_case=False , **snake_case , ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
lowerCAmelCase__ : Tuple = legacy_behaviour
super().__init__(
vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , legacy_behaviour=snake_case , **snake_case , )
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : int = False if not self.vocab_file else True
lowerCAmelCase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCAmelCase__ : Tuple = {
lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase__ : Union[str, Any] = src_lang if src_lang is not None else "eng_Latn"
lowerCAmelCase__ : List[str] = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ : Any = src_lang
lowerCAmelCase__ : Optional[Any] = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
lowerCAmelCase__ : str = self.convert_tokens_to_ids(snake_case )
lowerCAmelCase__ : Any = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = "eng_Latn" , snake_case = None , snake_case = "fra_Latn" , **snake_case , ):
"""simple docstring"""
lowerCAmelCase__ : int = src_lang
lowerCAmelCase__ : str = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ : int = [self.cur_lang_code]
lowerCAmelCase__ : List[str] = [self.eos_token_id]
lowerCAmelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : str = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.convert_tokens_to_ids(snake_case )
if self.legacy_behaviour:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase__ : str = [self.cur_lang_code]
lowerCAmelCase__ : Tuple = [self.eos_token_id]
lowerCAmelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCAmelCase__ : str = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 453 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( __magic_name__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "width_multiplier" ) )
class __a :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=2 , snake_case=3 , snake_case="swish" , snake_case=3 , snake_case=32 , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=10 , snake_case=None , snake_case=0.25 , snake_case=0.0 , snake_case=0.0 , ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : Tuple = make_divisible(512 * width_multiplier , divisor=8 )
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = conv_kernel_size
lowerCAmelCase__ : List[str] = output_stride
lowerCAmelCase__ : List[Any] = classifier_dropout_prob
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Optional[Any] = width_multiplier
lowerCAmelCase__ : Union[str, Any] = ffn_dropout
lowerCAmelCase__ : Tuple = attn_dropout
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = MobileViTVaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : int = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Any = MobileViTVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : int = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
lowerCAmelCase__ : Any = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ : Tuple = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = config_and_inputs
lowerCAmelCase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : int = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCAmelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(snake_case )
lowerCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case , snake_case , snake_case ):
lowerCAmelCase__ : List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : List[Any] = 5
self.assertEqual(len(snake_case ) , snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ : str = 2
for i in range(len(snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Dict = MobileViTVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
snake_case )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : str = model(**snake_case )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowerCAmelCase__ : Optional[int] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Tuple = model.to(snake_case )
lowerCAmelCase__ : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Any = model(**snake_case )
lowerCAmelCase__ : List[str] = outputs.logits
# verify the logits
lowerCAmelCase__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case )
lowerCAmelCase__ : Dict = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : List[Any] = model.to(snake_case )
lowerCAmelCase__ : str = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**snake_case )
lowerCAmelCase__ : List[Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(50, 60)] )
lowerCAmelCase__ : Union[str, Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case )
lowerCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case )
lowerCAmelCase__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case )
| 453 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = ['model.decoder.embed_positions.weights']
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if "emb" in name:
a_ = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
a_ = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
a_ = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
a_ = name.replace("linear1" , "fc1" )
if "linear2" in name:
a_ = name.replace("linear2" , "fc2" )
if "norm1" in name:
a_ = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
a_ = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
a_ = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
a_ = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
a_ = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
a_ = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple[Dict, Dict]:
"""simple docstring"""
a_ = list(state_dict.keys() )
a_ = {}
for key in keys:
a_ = state_dict.pop(UpperCAmelCase )
a_ = rename_keys(UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
a_ = val[:hidden_size, :]
a_ = val[hidden_size : 2 * hidden_size, :]
a_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
a_ = val
else:
a_ = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( UpperCAmelCase ) ->MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
a_ = 1_024
a_ = 24
a_ = 16
elif checkpoint == "medium":
a_ = 1_536
a_ = 48
a_ = 24
elif checkpoint == "large":
a_ = 2_048
a_ = 48
a_ = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
a_ = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase , num_attention_heads=UpperCAmelCase , )
return config
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="cpu" ) ->Tuple:
"""simple docstring"""
a_ = MusicGen.get_pretrained(UpperCAmelCase , device=UpperCAmelCase )
a_ = decoder_config_from_checkpoint(UpperCAmelCase )
a_ = fairseq_model.lm.state_dict()
a_ , a_ = rename_state_dict(
UpperCAmelCase , hidden_size=decoder_config.hidden_size )
a_ = TaEncoderModel.from_pretrained("t5-base" )
a_ = EncodecModel.from_pretrained("facebook/encodec_32khz" )
a_ = MusicgenForCausalLM(UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
a_ , a_ = decoder.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCAmelCase ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
a_ = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase , audio_encoder=UpperCAmelCase , decoder=UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase )
# check we can do a forward pass
a_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
a_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
a_ = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
a_ = AutoTokenizer.from_pretrained("t5-base" )
a_ = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
a_ = MusicgenProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
# set the appropriate bos/pad token ids
a_ = 2_048
a_ = 2_048
# set other default generation config params
a_ = int(30 * audio_encoder.config.frame_rate )
a_ = True
a_ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCAmelCase )
processor.push_to_hub(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
UpperCamelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 707 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
a_ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a_ = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
a_ = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
a_ = tempfile.mkdtemp()
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(self.tmpdirname , __UpperCAmelCase)
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
with open(self.feature_extraction_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
# load decoder from hub
a_ = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[Any]:
a_ = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Optional[int]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
shutil.rmtree(self.tmpdirname)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.get_tokenizer()
a_ = self.get_feature_extractor()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
a_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase)
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
a_ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha , 5.0)
self.assertEqual(processor.language_model.beta , 3.0)
self.assertEqual(processor.language_model.score_boundary , -7.0)
self.assertEqual(processor.language_model.unk_score_offset , 3)
def UpperCAmelCase__ ( self) ->Any:
a_ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"])
with self.assertRaisesRegex(__UpperCAmelCase , "include"):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = floats_list((3, 10_00))
a_ = feature_extractor(__UpperCAmelCase , return_tensors="np")
a_ = processor(__UpperCAmelCase , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = "This is a test string"
a_ = processor(text=__UpperCAmelCase)
a_ = tokenizer(__UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCAmelCase__ ( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77) ->Any:
np.random.seed(__UpperCAmelCase)
return np.random.rand(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits(shape=(10, 16) , seed=13)
a_ = processor.decode(__UpperCAmelCase)
a_ = decoder.decode_beams(__UpperCAmelCase)[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text)
self.assertEqual("</s> <s> </s>" , decoded_processor.text)
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score)
@parameterized.expand([[None], ["fork"], ["spawn"]])
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a_ = processor.batch_decode(__UpperCAmelCase)
else:
with get_context(__UpperCAmelCase).Pool() as pool:
a_ = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase)
a_ = list(__UpperCAmelCase)
with get_context("fork").Pool() as p:
a_ = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ , a_ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(__UpperCAmelCase , decoded_processor.text)
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text)
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score)
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits()
a_ = 15
a_ = -20.0
a_ = -4.0
a_ = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
a_ = decoded_processor_out.text
a_ = list(__UpperCAmelCase)
with get_context("fork").Pool() as pool:
a_ = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
a_ = [d[0][0] for d in decoded_decoder_out]
a_ = [d[0][2] for d in decoded_decoder_out]
a_ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCAmelCase)
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCAmelCase , atol=1E-3))
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-15.554, -13.9_474] , __UpperCAmelCase , atol=1E-3))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
a_ = self._get_dummy_logits()
a_ = 2.0
a_ = 5.0
a_ = -20.0
a_ = True
a_ = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
a_ = decoded_processor_out.text
a_ = list(__UpperCAmelCase)
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context("fork").Pool() as pool:
a_ = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
a_ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCAmelCase)
a_ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0)
self.assertEqual(lm_model.beta , 5.0)
self.assertEqual(lm_model.unk_score_offset , -20.0)
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = processor.decoder.model_container[processor.decoder._model_key]
a_ = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute()
a_ = os.listdir(__UpperCAmelCase)
a_ = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = snapshot_download("hf-internal-testing/processor_with_lm")
a_ = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase)
a_ = processor.decoder.model_container[processor.decoder._model_key]
a_ = Path(language_model._kenlm_model.path.decode("utf-8")).parent.parent.absolute()
a_ = os.listdir(__UpperCAmelCase)
a_ = os.listdir(__UpperCAmelCase)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Any:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = floats_list((3, 10_00))
a_ = processor_wavaveca(__UpperCAmelCase , return_tensors="np")
a_ = processor_auto(__UpperCAmelCase , return_tensors="np")
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2)
a_ = self._get_dummy_logits()
a_ = processor_wavaveca.batch_decode(__UpperCAmelCase)
a_ = processor_auto.batch_decode(__UpperCAmelCase)
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text)
def UpperCAmelCase__ ( self) ->str:
a_ = self.get_feature_extractor()
a_ = self.get_tokenizer()
a_ = self.get_decoder()
a_ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase)
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = self._get_dummy_logits()[0]
a_ = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue("text" in outputs)
self.assertTrue("word_offsets" in outputs)
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase))
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word")) , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word") , ["<s>", "<s>", "</s>"])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset") , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset") , [1, 3, 5])
def UpperCAmelCase__ ( self) ->List[str]:
a_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm")
a_ = self._get_dummy_logits()
a_ = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue("text" in outputs)
self.assertTrue("word_offsets" in outputs)
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase))
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCAmelCase , "word")) for o in outputs["word_offsets"]] , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word") , ["<s>", "<s>", "</s>"])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset") , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset") , [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase__ ( self) ->List[Any]:
import torch
a_ = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCAmelCase)
a_ = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00))
a_ = iter(__UpperCAmelCase)
a_ = next(__UpperCAmelCase)
a_ = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm")
a_ = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm")
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a_ = processor(sample["audio"]["array"] , return_tensors="pt").input_values
with torch.no_grad():
a_ = model(__UpperCAmelCase).logits.cpu().numpy()
a_ = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase)
a_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a_ = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
a_ = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word")) , __UpperCAmelCase)
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word")) , output.text)
# output times
a_ = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "start_time"))
a_ = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "end_time"))
# fmt: off
a_ = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599])
a_ = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94])
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01))
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01))
| 210 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : BigBirdConfig
_A : jnp.dtype = jnp.floataa
_A : bool = True
def A_ ( self ):
super().setup()
snake_case__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = super().__call__(*lowerCamelCase , **lowerCamelCase )
snake_case__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : List[str] = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
def cross_entropy(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
snake_case__ = logits.shape[-1]
snake_case__ = (labels[..., None] == jnp.arange(__lowerCAmelCase )[None]).astype("f4" )
snake_case__ = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
snake_case__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case__ = reduction(__lowerCAmelCase )
return loss
snake_case__ = partial(__lowerCAmelCase , reduction=jnp.mean )
snake_case__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : str = "google/bigbird-roberta-base"
_A : int = 30_00
_A : int = 1_05_00
_A : int = 1_28
_A : int = 3
_A : int = 1
_A : int = 5
# tx_args
_A : float = 3e-5
_A : float = 0.0
_A : int = 2_00_00
_A : float = 0.0_0_9_5
_A : str = "bigbird-roberta-natural-questions"
_A : str = "training-expt"
_A : str = "data/nq-training.jsonl"
_A : str = "data/nq-validation.jsonl"
def A_ ( self ):
os.makedirs(self.base_dir , exist_ok=lowerCamelCase )
snake_case__ = os.path.join(self.base_dir , self.save_dir )
snake_case__ = self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : int
_A : int = 40_96 # no dynamic padding on TPUs
def __call__( self , lowerCamelCase ):
snake_case__ = self.collate_fn(lowerCamelCase )
snake_case__ = jax.tree_util.tree_map(lowerCamelCase , lowerCamelCase )
return batch
def A_ ( self , lowerCamelCase ):
snake_case__ , snake_case__ = self.fetch_inputs(features["input_ids"] )
snake_case__ = {
"input_ids": jnp.array(lowerCamelCase , dtype=jnp.intaa ),
"attention_mask": jnp.array(lowerCamelCase , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def A_ ( self , lowerCamelCase ):
snake_case__ = [self._fetch_inputs(lowerCamelCase ) for ids in input_ids]
return zip(*lowerCamelCase )
def A_ ( self , lowerCamelCase ):
snake_case__ = [1 for _ in range(len(lowerCamelCase ) )]
while len(lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
if seed is not None:
snake_case__ = dataset.shuffle(seed=__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) // batch_size ):
snake_case__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCAmelCase )
@partial(jax.pmap , axis_name="batch" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
def loss_fn(__lowerCAmelCase ):
snake_case__ = model_inputs.pop("start_labels" )
snake_case__ = model_inputs.pop("end_labels" )
snake_case__ = model_inputs.pop("pooled_labels" )
snake_case__ = state.apply_fn(**__lowerCAmelCase , params=__lowerCAmelCase , dropout_rng=__lowerCAmelCase , train=__lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ = outputs
return state.loss_fn(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
snake_case__ , snake_case__ = jax.random.split(__lowerCAmelCase )
snake_case__ = jax.value_and_grad(__lowerCAmelCase )
snake_case__ , snake_case__ = grad_fn(state.params )
snake_case__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
snake_case__ = jax.lax.pmean(__lowerCAmelCase , "batch" )
snake_case__ = state.apply_gradients(grads=__lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , **__lowerCAmelCase ):
snake_case__ = model_inputs.pop("start_labels" )
snake_case__ = model_inputs.pop("end_labels" )
snake_case__ = model_inputs.pop("pooled_labels" )
snake_case__ = state.apply_fn(**__lowerCAmelCase , params=state.params , train=__lowerCAmelCase )
snake_case__ , snake_case__ , snake_case__ = outputs
snake_case__ = state.loss_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _SCREAMING_SNAKE_CASE ( train_state.TrainState ):
_A : Callable = struct.field(pytree_node=__UpperCamelCase )
@dataclass
class _SCREAMING_SNAKE_CASE :
_A : Args
_A : Callable
_A : Callable
_A : Callable
_A : Callable
_A : wandb
_A : Callable = None
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
snake_case__ = model.params
snake_case__ = TrainState.create(
apply_fn=model.__call__ , params=lowerCamelCase , tx=lowerCamelCase , loss_fn=lowerCamelCase , )
if ckpt_dir is not None:
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = restore_checkpoint(lowerCamelCase , lowerCamelCase )
snake_case__ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
snake_case__ , snake_case__ = build_tx(**lowerCamelCase )
snake_case__ = train_state.TrainState(
step=lowerCamelCase , apply_fn=model.__call__ , params=lowerCamelCase , tx=lowerCamelCase , opt_state=lowerCamelCase , )
snake_case__ = args
snake_case__ = data_collator
snake_case__ = lr
snake_case__ = params
snake_case__ = jax_utils.replicate(lowerCamelCase )
return state
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = self.args
snake_case__ = len(lowerCamelCase ) // args.batch_size
snake_case__ = jax.random.PRNGKey(0 )
snake_case__ = jax.random.split(lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case__ = jnp.array(0 , dtype=jnp.floataa )
snake_case__ = get_batched_dataset(lowerCamelCase , args.batch_size , seed=lowerCamelCase )
snake_case__ = 0
for batch in tqdm(lowerCamelCase , total=lowerCamelCase , desc=F"""Running EPOCH-{epoch}""" ):
snake_case__ = self.data_collator(lowerCamelCase )
snake_case__ , snake_case__ , snake_case__ = self.train_step_fn(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
snake_case__ = jax_utils.unreplicate(state.step )
snake_case__ = running_loss.item() / i
snake_case__ = self.scheduler_fn(state_step - 1 )
snake_case__ = self.evaluate(lowerCamelCase , lowerCamelCase )
snake_case__ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(lowerCamelCase ) )
self.logger.log(lowerCamelCase , commit=lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = get_batched_dataset(lowerCamelCase , self.args.batch_size )
snake_case__ = len(lowerCamelCase ) // self.args.batch_size
snake_case__ = jnp.array(0 , dtype=jnp.floataa )
snake_case__ = 0
for batch in tqdm(lowerCamelCase , total=lowerCamelCase , desc="Evaluating ... " ):
snake_case__ = self.data_collator(lowerCamelCase )
snake_case__ = self.val_step_fn(lowerCamelCase , **lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def A_ ( self , lowerCamelCase , lowerCamelCase ):
snake_case__ = jax_utils.unreplicate(lowerCamelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(lowerCamelCase , params=state.params )
with open(os.path.join(lowerCamelCase , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCamelCase , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(lowerCamelCase , "data_collator.joblib" ) )
with open(os.path.join(lowerCamelCase , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , lowerCamelCase )
print("DONE" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(__lowerCAmelCase , "flax_model.msgpack" ) , "rb" ) as f:
snake_case__ = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCAmelCase , "opt_state.msgpack" ) , "rb" ) as f:
snake_case__ = from_bytes(state.opt_state , f.read() )
snake_case__ = joblib.load(os.path.join(__lowerCAmelCase , "args.joblib" ) )
snake_case__ = joblib.load(os.path.join(__lowerCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(__lowerCAmelCase , "training_state.json" ) , "r" ) as f:
snake_case__ = json.load(__lowerCAmelCase )
snake_case__ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = num_train_steps - warmup_steps
snake_case__ = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=__lowerCAmelCase , transition_steps=__lowerCAmelCase )
snake_case__ = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=1E-7 , transition_steps=__lowerCAmelCase )
snake_case__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
def weight_decay_mask(__lowerCAmelCase ):
snake_case__ = traverse_util.flatten_dict(__lowerCAmelCase )
snake_case__ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCAmelCase )
snake_case__ = scheduler_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = optax.adamw(learning_rate=__lowerCAmelCase , weight_decay=__lowerCAmelCase , mask=__lowerCAmelCase )
return tx, lr
| 276 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self , lowerCamelCase ):
snake_case__ = data
snake_case__ = None
def __str__( self ):
return F"""{self.data}"""
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ):
snake_case__ = None
def __iter__( self ):
snake_case__ = self.top
while node:
yield node.data
snake_case__ = node.next
def __str__( self ):
return "->".join([str(lowerCamelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def A_ ( self ):
return self.top is None
def A_ ( self , lowerCamelCase ):
snake_case__ = Node(lowerCamelCase )
if not self.is_empty():
snake_case__ = self.top
snake_case__ = node
def A_ ( self ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase )
snake_case__ = self.top
snake_case__ = self.top.next
return pop_node.data
def A_ ( self ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def A_ ( self ):
snake_case__ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 276 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowercase = parser.parse_args()
if args.model_type == "roberta":
lowercase = RobertaForMaskedLM.from_pretrained(args.model_name)
lowercase = """roberta"""
elif args.model_type == "gpt2":
lowercase = GPTaLMHeadModel.from_pretrained(args.model_name)
lowercase = """transformer"""
lowercase = model.state_dict()
lowercase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowercase = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowercase = f'{prefix}.embeddings.{w}.weight'
lowercase = state_dict[param_name]
for w in ["weight", "bias"]:
lowercase = f'{prefix}.embeddings.LayerNorm.{w}'
lowercase = state_dict[param_name]
# Transformer Blocks #
lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowercase = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
lowercase = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowercase = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase = state_dict[f'lm_head.dense.{w}']
lowercase = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowercase = state_dict[f'{prefix}.ln_f.{w}']
lowercase = state_dict["""lm_head.weight"""]
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 591 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 100 ):
'''simple docstring'''
UpperCamelCase__ = (n * (n + 1) // 2) ** 2
UpperCamelCase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 591 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = "<|endoftext|>" if eos_token is None else eos_token
lowerCamelCase__ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = "<pad>" if pad_token is None else pad_token
lowerCamelCase__ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(UpperCamelCase_ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , UpperCamelCase_ )
# Normalize whitespaces
lowerCamelCase__ = "".join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , UpperCamelCase_ )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(UpperCamelCase_ )
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase_ )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(UpperCamelCase_ )
lowerCamelCase__ = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase__ = self.preprocess_text(UpperCamelCase_ )
lowerCamelCase__ = self.sp_model.encode(UpperCamelCase_ )
else:
lowerCamelCase__ = [self.preprocess_text(UpperCamelCase_ ) for t in text]
lowerCamelCase__ = self.sp_model.encode(UpperCamelCase_ )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(UpperCamelCase_ )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(UpperCamelCase_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(UpperCamelCase_ ) + F'{self.bos_token}Bot:'
)
return self.encode(text=UpperCamelCase_ )
| 481 |
"""simple docstring"""
from typing import Any
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : str = data
__UpperCAmelCase : Optional[Any] = None
class a__ :
def __init__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" ")
__UpperCAmelCase : Tuple = temp.next
print()
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = Node(UpperCamelCase_)
__UpperCAmelCase : str = self.head
__UpperCAmelCase : Optional[int] = new_node
def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Tuple = node_a.next
__UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
A = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 77 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowerCAmelCase : Dict = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[int] = UNetaDModel
snake_case__ : Optional[Any] = 'sample'
@property
def _UpperCamelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
a__ = torch.tensor([10] ).to(__magic_name__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def _UpperCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def _UpperCamelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
a__ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = UNetaDModel
snake_case__ : Optional[int] = 'sample'
@property
def _UpperCamelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
a__ = torch.tensor([10] ).to(__magic_name__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
return (4, 32, 32)
@property
def _UpperCamelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
return (4, 32, 32)
def _UpperCamelCase ( self :Dict ) -> Dict:
'''simple docstring'''
a__ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
a__ , a__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__magic_name__ )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ )
model.to(__magic_name__ )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ , a__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ )
model_accelerate.to(__magic_name__ )
model_accelerate.eval()
a__ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ = noise.to(__magic_name__ )
a__ = torch.tensor([10] * noise.shape[0] ).to(__magic_name__ )
a__ = model_accelerate(__magic_name__ , __magic_name__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=__magic_name__ , low_cpu_mem_usage=__magic_name__ )
model_normal_load.to(__magic_name__ )
model_normal_load.eval()
a__ = model_normal_load(__magic_name__ , __magic_name__ )['''sample''']
assert torch_all_close(__magic_name__ , __magic_name__ , rtol=1e-3 )
def _UpperCamelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
a__ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__magic_name__ )
a__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ = noise.to(__magic_name__ )
a__ = torch.tensor([10] * noise.shape[0] ).to(__magic_name__ )
with torch.no_grad():
a__ = model(__magic_name__ , __magic_name__ ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1e-3 ) )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = UNetaDModel
snake_case__ : str = 'sample'
@property
def _UpperCamelCase ( self :Dict , __magic_name__ :Any=(32, 32) ) -> Optional[int]:
'''simple docstring'''
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__magic_name__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self :int ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def _UpperCamelCase ( self :int ) -> str:
'''simple docstring'''
return (3, 32, 32)
def _UpperCamelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self :str ) -> List[Any]:
'''simple docstring'''
a__ , a__ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__magic_name__ )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (256, 256) ).to(__magic_name__ )
a__ = noise
a__ = model(**__magic_name__ )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self :int ) -> int:
'''simple docstring'''
a__ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__magic_name__ )
a__ = 4
a__ = 3
a__ = (256, 256)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__magic_name__ )
a__ = torch.tensor(batch_size * [1e-4] ).to(__magic_name__ )
with torch.no_grad():
a__ = model(__magic_name__ , __magic_name__ ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1e-2 ) )
def _UpperCamelCase ( self :List[str] ) -> str:
'''simple docstring'''
a__ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__magic_name__ )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__magic_name__ )
a__ = torch.tensor(batch_size * [1e-4] ).to(__magic_name__ )
with torch.no_grad():
a__ = model(__magic_name__ , __magic_name__ ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1e-2 ) )
def _UpperCamelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
pass
| 158 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( UpperCamelCase = 4 ) -> list[list[int]]:
"""simple docstring"""
a__ = abs(UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase )] for y in range(UpperCamelCase )]
def __snake_case ( UpperCamelCase ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( UpperCamelCase ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( UpperCamelCase ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( UpperCamelCase ) -> list[list[int]]:
"""simple docstring"""
a__ = [list(UpperCamelCase ) for x in zip(*UpperCamelCase )]
return matrix
def __snake_case ( UpperCamelCase ) -> list[list[int]]:
"""simple docstring"""
a__ = matrix[::-1]
return matrix
def __snake_case ( UpperCamelCase ) -> list[list[int]]:
"""simple docstring"""
a__ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( UpperCamelCase ) -> None:
"""simple docstring"""
for i in matrix:
print(*UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Dict = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowerCAmelCase : Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowerCAmelCase : Dict = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 158 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case ( UpperCamelCase_ ):
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = jnp.floataa
_lowerCAmelCase = True
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
super().setup()
snake_case__ : List[str] = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class snake_case ( UpperCamelCase_ ):
"""simple docstring"""
_lowerCAmelCase = FlaxBigBirdForNaturalQuestionsModule
def _A ( snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
def cross_entropy(snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any=None ):
snake_case__ : Tuple = logits.shape[-1]
snake_case__ : Tuple = (labels[..., None] == jnp.arange(snake_case__ )[None]).astype('''f4''' )
snake_case__ : Dict = jax.nn.log_softmax(snake_case__ , axis=-1 )
snake_case__ : List[Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case__ : Any = reduction(snake_case__ )
return loss
snake_case__ : Any = partial(snake_case__ , reduction=jnp.mean )
snake_case__ : List[Any] = cross_entropy(snake_case__ , snake_case__ )
snake_case__ : int = cross_entropy(snake_case__ , snake_case__ )
snake_case__ : str = cross_entropy(snake_case__ , snake_case__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = "google/bigbird-roberta-base"
_lowerCAmelCase = 3_0_0_0
_lowerCAmelCase = 1_0_5_0_0
_lowerCAmelCase = 1_2_8
_lowerCAmelCase = 3
_lowerCAmelCase = 1
_lowerCAmelCase = 5
# tx_args
_lowerCAmelCase = 3e-5
_lowerCAmelCase = 0.0
_lowerCAmelCase = 2_0_0_0_0
_lowerCAmelCase = 0.00_95
_lowerCAmelCase = "bigbird-roberta-natural-questions"
_lowerCAmelCase = "training-expt"
_lowerCAmelCase = "data/nq-training.jsonl"
_lowerCAmelCase = "data/nq-validation.jsonl"
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : int = os.path.join(self.base_dir , self.save_dir )
snake_case__ : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = self.collate_fn(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = jax.tree_util.tree_map(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return batch
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.fetch_inputs(features['''input_ids'''] )
snake_case__ : int = {
'''input_ids''': jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : str = [self._fetch_inputs(__SCREAMING_SNAKE_CASE ) for ids in input_ids]
return zip(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = [1 for _ in range(len(__SCREAMING_SNAKE_CASE ) )]
while len(__SCREAMING_SNAKE_CASE ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _A ( snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str]=None ):
if seed is not None:
snake_case__ : Dict = dataset.shuffle(seed=snake_case__ )
for i in range(len(snake_case__ ) // batch_size ):
snake_case__ : Tuple = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case__ )
@partial(jax.pmap , axis_name='''batch''' )
def _A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , **snake_case__ : Dict ):
def loss_fn(snake_case__ : Tuple ):
snake_case__ : List[str] = model_inputs.pop('''start_labels''' )
snake_case__ : Optional[int] = model_inputs.pop('''end_labels''' )
snake_case__ : List[str] = model_inputs.pop('''pooled_labels''' )
snake_case__ : Optional[int] = state.apply_fn(**snake_case__ , params=snake_case__ , dropout_rng=snake_case__ , train=snake_case__ )
snake_case__ ,snake_case__ ,snake_case__ : List[Any] = outputs
return state.loss_fn(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
snake_case__ ,snake_case__ : int = jax.random.split(snake_case__ )
snake_case__ : Union[str, Any] = jax.value_and_grad(snake_case__ )
snake_case__ ,snake_case__ : Any = grad_fn(state.params )
snake_case__ : int = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
snake_case__ : Tuple = jax.lax.pmean(snake_case__ , '''batch''' )
snake_case__ : List[str] = state.apply_gradients(grads=snake_case__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def _A ( snake_case__ : int , **snake_case__ : str ):
snake_case__ : Dict = model_inputs.pop('''start_labels''' )
snake_case__ : int = model_inputs.pop('''end_labels''' )
snake_case__ : str = model_inputs.pop('''pooled_labels''' )
snake_case__ : Union[str, Any] = state.apply_fn(**snake_case__ , params=state.params , train=snake_case__ )
snake_case__ ,snake_case__ ,snake_case__ : Tuple = outputs
snake_case__ : List[str] = state.loss_fn(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
snake_case__ : List[str] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class snake_case ( train_state.TrainState ):
"""simple docstring"""
_lowerCAmelCase = struct.field(pytree_node=UpperCamelCase_ )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple = model.params
snake_case__ : List[Any] = TrainState.create(
apply_fn=model.__call__ , params=__SCREAMING_SNAKE_CASE , tx=__SCREAMING_SNAKE_CASE , loss_fn=__SCREAMING_SNAKE_CASE , )
if ckpt_dir is not None:
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Union[str, Any] = restore_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
snake_case__ ,snake_case__ : List[str] = build_tx(**__SCREAMING_SNAKE_CASE )
snake_case__ : str = train_state.TrainState(
step=__SCREAMING_SNAKE_CASE , apply_fn=model.__call__ , params=__SCREAMING_SNAKE_CASE , tx=__SCREAMING_SNAKE_CASE , opt_state=__SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = args
snake_case__ : Optional[int] = data_collator
snake_case__ : int = lr
snake_case__ : Tuple = params
snake_case__ : Tuple = jax_utils.replicate(__SCREAMING_SNAKE_CASE )
return state
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.args
snake_case__ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) // args.batch_size
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = jax.random.split(__SCREAMING_SNAKE_CASE , jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case__ : List[str] = jnp.array(0 , dtype=jnp.floataa )
snake_case__ : int = get_batched_dataset(__SCREAMING_SNAKE_CASE , args.batch_size , seed=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = 0
for batch in tqdm(__SCREAMING_SNAKE_CASE , total=__SCREAMING_SNAKE_CASE , desc=f'''Running EPOCH-{epoch}''' ):
snake_case__ : int = self.data_collator(__SCREAMING_SNAKE_CASE )
snake_case__ ,snake_case__ ,snake_case__ : List[Any] = self.train_step_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
snake_case__ : Optional[int] = jax_utils.unreplicate(state.step )
snake_case__ : str = running_loss.item() / i
snake_case__ : Optional[int] = self.scheduler_fn(state_step - 1 )
snake_case__ : Union[str, Any] = self.evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__SCREAMING_SNAKE_CASE ) )
self.logger.log(__SCREAMING_SNAKE_CASE , commit=__SCREAMING_SNAKE_CASE )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = get_batched_dataset(__SCREAMING_SNAKE_CASE , self.args.batch_size )
snake_case__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) // self.args.batch_size
snake_case__ : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
snake_case__ : Dict = 0
for batch in tqdm(__SCREAMING_SNAKE_CASE , total=__SCREAMING_SNAKE_CASE , desc='''Evaluating ... ''' ):
snake_case__ : str = self.data_collator(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.val_step_fn(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = jax_utils.unreplicate(__SCREAMING_SNAKE_CASE )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(__SCREAMING_SNAKE_CASE , params=state.params )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__SCREAMING_SNAKE_CASE , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(__SCREAMING_SNAKE_CASE , '''data_collator.joblib''' ) )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , __SCREAMING_SNAKE_CASE )
print('''DONE''' )
def _A ( snake_case__ : int , snake_case__ : Dict ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(snake_case__ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
snake_case__ : str = from_bytes(state.params , f.read() )
with open(os.path.join(snake_case__ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
snake_case__ : List[Any] = from_bytes(state.opt_state , f.read() )
snake_case__ : List[str] = joblib.load(os.path.join(snake_case__ , '''args.joblib''' ) )
snake_case__ : Tuple = joblib.load(os.path.join(snake_case__ , '''data_collator.joblib''' ) )
with open(os.path.join(snake_case__ , '''training_state.json''' ) , '''r''' ) as f:
snake_case__ : Any = json.load(snake_case__ )
snake_case__ : List[Any] = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def _A ( snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : str ):
snake_case__ : int = num_train_steps - warmup_steps
snake_case__ : Dict = optax.linear_schedule(init_value=snake_case__ , end_value=snake_case__ , transition_steps=snake_case__ )
snake_case__ : Union[str, Any] = optax.linear_schedule(init_value=snake_case__ , end_value=1E-7 , transition_steps=snake_case__ )
snake_case__ : str = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _A ( snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
def weight_decay_mask(snake_case__ : Any ):
snake_case__ : Optional[Any] = traverse_util.flatten_dict(snake_case__ )
snake_case__ : Dict = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case__ )
snake_case__ : Optional[int] = scheduler_fn(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
snake_case__ : Tuple = optax.adamw(learning_rate=snake_case__ , weight_decay=snake_case__ , mask=snake_case__ )
return tx, lr
| 261 |
from timeit import timeit
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE_ ( ) -> None:
def do_benchmark(snake_case__ ) -> None:
lowerCAmelCase = '''import __main__ as z'''
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(snake_case__ ) = }" )
lowerCAmelCase = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case__ )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }" )
lowerCAmelCase = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case__ , )
print(f"timeit() runs in {timing} seconds" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 312 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""pixel_values"""]
def __init__( self, _a = True, _a = None, _a = PIL.Image.BICUBIC, _a = True, _a = None, _a = 1 / 2_55, _a = True, _a = True, _a = None, _a = None, **_a, ) -> None:
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 2_56, "width": 2_56}
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__SCREAMING_SNAKE_CASE = get_size_dict(_a, param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self, _a, _a, _a = PIL.Image.BICUBIC, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_a, size=(size["height"], size["width"]), resample=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_a, size=(size["height"], size["width"]), data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a = None, **_a, ) -> List[Any]:
return rescale(_a, scale=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a, _a, _a = None, **_a, ) -> np.ndarray:
return normalize(_a, mean=_a, std=_a, data_format=_a, **_a )
def __lowerCAmelCase ( self, _a, _a = None, _a = None, _a=None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = None, _a = ChannelDimension.FIRST, **_a, ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(_a, param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_a ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_a, size=_a, resample=_a ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=_a, size=_a ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_a, scale=_a ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_a, mean=_a, std=_a ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_a, _a ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=_a, tensor_type=_a )
| 710 |
from typing import List
import numpy as np
def _A ( __snake_case :dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__SCREAMING_SNAKE_CASE = max(lists_lengths.values() , default=0 )
return max(1 , __snake_case )
def _A ( __snake_case :int , __snake_case :int ) -> List[range]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for group_idx in range(__snake_case ):
__SCREAMING_SNAKE_CASE = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__SCREAMING_SNAKE_CASE = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__SCREAMING_SNAKE_CASE = range(__snake_case , start + num_shards_to_add )
shards_indices_per_group.append(__snake_case )
return shards_indices_per_group
def _A ( __snake_case :dict , __snake_case :int ) -> List[dict]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(__snake_case )
if num_shards == 1:
return [dict(__snake_case )]
else:
__SCREAMING_SNAKE_CASE = _distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__snake_case , __snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__snake_case ) )
]
def _A ( __snake_case :List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( __snake_case :np.random.Generator , __snake_case :dict ) -> dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )}
__SCREAMING_SNAKE_CASE = {}
for size in list_sizes:
__SCREAMING_SNAKE_CASE = list(range(__snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__SCREAMING_SNAKE_CASE = dict(__snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = [value[i] for i in indices_per_size[len(__snake_case )]]
return shuffled_kwargs
| 214 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A__( unittest.TestCase ):
def _a ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__SCREAMING_SNAKE_CASE = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
__SCREAMING_SNAKE_CASE = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def _a ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : Any ) -> List[str]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def _a ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__UpperCamelCase , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE = processor(images=__UpperCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = '''lower newer'''
__SCREAMING_SNAKE_CASE = processor(text=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = '''lower newer'''
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _a ( self : Any ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(images=__UpperCamelCase , visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _a ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPSegProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(__UpperCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
| 482 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A : List[str] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 100
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_UpperCAmelCase = scount * numref
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCAmelCase = ccount * numref
# KEEP
_UpperCAmelCase = sgramcounter_rep & cgramcounter_rep
_UpperCAmelCase = keepgramcounter_rep & rgramcounter
_UpperCAmelCase = sgramcounter_rep & rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCAmelCase = sgramcounter_rep - cgramcounter_rep
_UpperCAmelCase = delgramcounter_rep - rgramcounter
_UpperCAmelCase = sgramcounter_rep - rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ssent.split(''' ''' )
_UpperCAmelCase = csent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rsent in rsents:
_UpperCAmelCase = rsent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
if lowercase:
_UpperCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sentence
if not return_str:
_UpperCAmelCase = normalized_sent.split()
return normalized_sent
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_UpperCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_UpperCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 100 * sari_score
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str="exp" , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , ):
'''simple docstring'''
_UpperCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : Dict )->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] )->Any:
_UpperCAmelCase = {}
result.update({'''sari''': compute_sari(sources=__UpperCamelCase , predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
return result
| 602 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''CLIPFeatureExtractor''']
lowerCamelCase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 161 |
from itertools import count
def UpperCamelCase( lowercase_ = 50 ) -> int:
'''simple docstring'''
snake_case_ = [1] * min_block_length
for n in count(lowercase_ ):
fill_count_functions.append(1 )
for block_length in range(lowercase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 161 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowerCamelCase : Tuple = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase = 6378137.0
_lowerCamelCase = 6356752.314245
_lowerCamelCase = 6378137
def _lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
__SCREAMING_SNAKE_CASE : Dict = atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : List[Any] = atan((1 - flattening) * tan(radians(__lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = radians(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = radians(__lowerCamelCase )
# Equation
__SCREAMING_SNAKE_CASE : Union[str, Any] = sin((phi_a - phi_a) / 2 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__SCREAMING_SNAKE_CASE : List[str] = sqrt(sin_sq_phi + (cos(__lowerCamelCase ) * cos(__lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client("iam" )
__SCREAMING_SNAKE_CASE : List[Any] = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase , AssumeRolePolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) )
__SCREAMING_SNAKE_CASE : str = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = botoa.client("iam" )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , __lowerCamelCase , )
__SCREAMING_SNAKE_CASE : str = None
if credentials_configuration == 0:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
__SCREAMING_SNAKE_CASE : Any = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
__SCREAMING_SNAKE_CASE : Dict = _ask_field("AWS Access Key ID: " )
__SCREAMING_SNAKE_CASE : Union[str, Any] = aws_access_key_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field("AWS Secret Access Key: " )
__SCREAMING_SNAKE_CASE : Optional[int] = aws_secret_access_key
__SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
__SCREAMING_SNAKE_CASE : Any = aws_region
__SCREAMING_SNAKE_CASE : int = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , __lowerCamelCase , )
if role_management == 0:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field("Enter your IAM role name: " )
else:
__SCREAMING_SNAKE_CASE : Any = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Tuple = None
if is_custom_docker_image:
__SCREAMING_SNAKE_CASE : List[Any] = _ask_field("Enter your Docker image: " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : List[Any] = None
if is_sagemaker_inputs_enabled:
__SCREAMING_SNAKE_CASE : List[str] = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : int = None
if is_sagemaker_metrics_enabled:
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
__SCREAMING_SNAKE_CASE : Any = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
__SCREAMING_SNAKE_CASE : Tuple = {}
__SCREAMING_SNAKE_CASE : str = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
if use_dynamo:
__SCREAMING_SNAKE_CASE : Optional[Any] = "dynamo_"
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
if use_custom_options:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Which mode do you want to use?" , __lowerCamelCase , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )] , default="default" , )
__SCREAMING_SNAKE_CASE : str = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="Please enter yes or no." , )
__SCREAMING_SNAKE_CASE : Optional[int] = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
__SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
__lowerCamelCase , __lowerCamelCase , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(__lowerCamelCase , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , default="ml.p3.2xlarge" )
__SCREAMING_SNAKE_CASE : List[Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__SCREAMING_SNAKE_CASE : Any = _ask_field(
"How many machines do you want use? [1]: " , __lowerCamelCase , default=1 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=__lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCamelCase , use_cpu=__lowerCamelCase , dynamo_config=__lowerCamelCase , eca_instance_type=__lowerCamelCase , profile=__lowerCamelCase , region=__lowerCamelCase , iam_role_name=__lowerCamelCase , mixed_precision=__lowerCamelCase , num_machines=__lowerCamelCase , sagemaker_inputs_file=__lowerCamelCase , sagemaker_metrics_file=__lowerCamelCase , )
| 447 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
_lowerCAmelCase : List[str] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_lowerCamelCase ) != count_coins(_lowerCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCamelCase, _lowerCamelCase : Dict = get_distrib(node.left )
_lowerCamelCase, _lowerCamelCase : str = get_distrib(node.right )
_lowerCamelCase : Any = 1 - left_distrib_excess
_lowerCamelCase : Dict = 1 - right_distrib_excess
_lowerCamelCase : Union[str, Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowerCamelCase )
+ abs(_lowerCamelCase )
)
_lowerCamelCase : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowerCamelCase , _lowerCamelCase )
return get_distrib(_lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> int:
if len(lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
__snake_case : Tuple = sum(array[:k] )
for i in range(len(lowercase ) - k ):
__snake_case : str = current_sum - array[i] + array[i + k]
__snake_case : List[str] = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCamelCase = [randint(-1000, 1000) for i in range(100)]
_UpperCamelCase = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 243 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ):
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = image_size
snake_case_ : Tuple = patch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : int = is_training
snake_case_ : Tuple = use_labels
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : str = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = scope
snake_case_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ : Optional[Any] = (image_size // patch_size) ** 2
snake_case_ : Union[str, Any] = num_patches + 2
def snake_case__ ( self):
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
snake_case_ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_):
snake_case_ : Tuple = DeiTModel(config=lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_):
snake_case_ : Union[str, Any] = DeiTForMaskedImageModeling(config=lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : Any = DeiTForMaskedImageModeling(lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ : List[str] = model(lowercase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_):
snake_case_ : Any = self.type_sequence_label_size
snake_case_ : str = DeiTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
snake_case_ : int = 1
snake_case_ : str = DeiTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ : Optional[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
snake_case_
) : Any = config_and_inputs
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def snake_case__ ( self):
snake_case_ : Tuple = DeiTModelTester(self)
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds")
def snake_case__ ( self):
pass
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : int = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def snake_case__ ( self):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowercase_)
snake_case_ : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def snake_case__ ( self):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def snake_case__ ( self):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_)
def snake_case__ ( self):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_=False):
snake_case_ : List[Any] = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case__ ( self):
if not self.model_tester.is_training:
return
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case_ : Tuple = model_class(lowercase_)
model.to(lowercase_)
model.train()
snake_case_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
snake_case_ : Optional[int] = model(**lowercase_).loss
loss.backward()
def snake_case__ ( self):
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ : List[Any] = False
snake_case_ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case_ : int = model_class(lowercase_)
model.gradient_checkpointing_enable()
model.to(lowercase_)
model.train()
snake_case_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
snake_case_ : Union[str, Any] = model(**lowercase_).loss
loss.backward()
def snake_case__ ( self):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_),
*get_values(lowercase_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}'):
snake_case_ : Optional[int] = problem_type["title"]
snake_case_ : List[Any] = problem_type["num_labels"]
snake_case_ : List[Any] = model_class(lowercase_)
model.to(lowercase_)
model.train()
snake_case_ : Dict = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if problem_type["num_labels"] > 1:
snake_case_ : str = inputs["labels"].unsqueeze(1).repeat(1 , problem_type["num_labels"])
snake_case_ : List[str] = inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_) as warning_list:
snake_case_ : Union[str, Any] = model(**lowercase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
@slow
def snake_case__ ( self):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = DeiTModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224")
if is_vision_available()
else None
)
@slow
def snake_case__ ( self):
snake_case_ : int = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to(
lowercase_)
snake_case_ : Dict = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
snake_case_ : Any = model(**lowercase_)
# verify the logits
snake_case_ : int = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowercase_)
snake_case_ : Optional[int] = torch.tensor([-1.0_266, 0.1_912, -1.2_861]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def snake_case__ ( self):
snake_case_ : Any = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto")
snake_case_ : List[str] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt")
snake_case_ : List[Any] = inputs.pixel_values.to(lowercase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ : Optional[Any] = model(lowercase_)
| 716 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : List[Any] = 0.00
snake_case_ : int = 0
for resistor in resistors:
if resistor <= 0:
snake_case_ : Dict = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(__SCREAMING_SNAKE_CASE )
first_sum += 1 / float(__SCREAMING_SNAKE_CASE )
index += 1
return 1 / first_sum
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0.00
snake_case_ : List[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case_ : str = f'Resistor at index {index} has a negative value!'
raise ValueError(__SCREAMING_SNAKE_CASE )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: Tuple , UpperCamelCase: Dict , UpperCamelCase: List[str]=7 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=18 , UpperCamelCase: Any=30 , UpperCamelCase: List[str]=4_00 , UpperCamelCase: List[Any]=None , UpperCamelCase: List[Any]=True , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=None , ) -> int:
snake_case__ = size if size is not None else {'height': 20, 'width': 20}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = do_convert_rgb
snake_case__ = [5_12, 10_24, 20_48, 40_96]
snake_case__ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]:
snake_case__ = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
snake_case__ = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]:
snake_case__ = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self: List[Any] ) -> str:
snake_case__ = self.image_processor_tester.prepare_dummy_image()
snake_case__ = self.image_processing_class(**self.image_processor_dict )
snake_case__ = 20_48
snake_case__ = image_processor(UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case__ = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
snake_case__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase ):
snake_case__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
snake_case__ = 'Hello'
snake_case__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase , header_text=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case__ = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase , header_text=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
snake_case__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case__ = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case__ = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case__ = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case__ = 3
@property
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Dict ) -> Dict:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self: int ) -> Any:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case__ = image_processor(
UpperCamelCase , return_tensors='pt' , max_patches=UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 328 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""")
a_ : List[str] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
a_ : Optional[int] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
a_ : Tuple = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
a_ : Union[str, Any] = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
a_ : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a_ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a_ : Any = []
a_ : str = []
def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l0":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l0":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l0":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l0":
lowerCamelCase_ = value
elif weight_type == "weight_ih_l1":
lowerCamelCase_ = value
elif weight_type == "weight_hh_l1":
lowerCamelCase_ = value
elif weight_type == "bias_ih_l1":
lowerCamelCase_ = value
elif weight_type == "bias_hh_l1":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCamelCase_ = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCamelCase_ = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info(F'''{name} was ignored''' )
continue
lowerCamelCase_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." )
if prefix in name and suffix in name:
lowerCamelCase_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "weight_ih_l0" in name:
lowerCamelCase_ = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowerCamelCase_ = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowerCamelCase_ = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowerCamelCase_ = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowerCamelCase_ = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowerCamelCase_ = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowerCamelCase_ = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowerCamelCase_ = "bias_hh_l1"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ):
if config_path is not None:
lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCamelCase_ = [8, 5, 4, 4]
lowerCamelCase_ = [2.2]
lowerCamelCase_ = 64
lowerCamelCase_ = 32000
lowerCamelCase_ = 2048
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
elif model_name == "encodec_48khz":
lowerCamelCase_ = [8, 5, 4, 2]
lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0]
lowerCamelCase_ = 48000
lowerCamelCase_ = 2
lowerCamelCase_ = False
lowerCamelCase_ = "time_group_norm"
lowerCamelCase_ = True
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCamelCase_ = EncodecModel(UpperCAmelCase_ )
lowerCamelCase_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCamelCase_ = original_checkpoint["best_state"]
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a_ : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 675 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
a_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
a_ = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__ ( self : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =0.0
for i, j in zip(__lowercase , __lowercase ):
n_correct += 1.0 if math_equivalence.is_equiv(__lowercase , __lowercase ) else 0.0
SCREAMING_SNAKE_CASE__ : str =n_correct / len(__lowercase )
return {
"accuracy": accuracy,
}
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase = torch.permute(__snake_case , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ):
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[str]:
if "metadata" in layer:
_UpperCAmelCase = layer.split("""metadata""" )
_UpperCAmelCase = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_UpperCAmelCase = layer.split("""kvstore""" )
_UpperCAmelCase = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_UpperCAmelCase = layer.split("""/""" )
_UpperCAmelCase = """/""".join(split_layer[:-1] )
_UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_UpperCAmelCase = """file"""
else:
_UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]:
_UpperCAmelCase = rename_keys(__snake_case )
_UpperCAmelCase = {}
for k, v in current_block.items():
_UpperCAmelCase = v
_UpperCAmelCase = new_current_block
torch.save(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = WEIGHTS_NAME ) -> Any:
_UpperCAmelCase = convert_file_size_to_int(__snake_case )
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_UpperCAmelCase = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_UpperCAmelCase = flatten_dict(__snake_case , sep="""/""" )
_UpperCAmelCase = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict(
__snake_case , __snake_case , __snake_case )
if curr_real_layer_name in all_layers:
_UpperCAmelCase = content
else:
_UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase = torch.tensor(__snake_case )
_UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __snake_case )
_UpperCAmelCase = """/""".join(__snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase = os.path.join(
__snake_case , weights_name.replace(""".bin""" , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__snake_case , __snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = raw_weights.to(getattr(__snake_case , __snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase = os.path.join(__snake_case , weights_name.replace(""".bin""" , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__snake_case , __snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for idx, shard in enumerate(__snake_case ):
_UpperCAmelCase = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(__snake_case ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_UpperCAmelCase = os.path.join(__snake_case , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
_UpperCAmelCase = shard
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {"""total_size""": total_size}
_UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__snake_case , __snake_case ) , """w""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n"""
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
__a: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__a: int = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_UpperCAmelCase = TaTokenizer.from_pretrained("""t5-small""" )
_UpperCAmelCase = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_UpperCAmelCase = tokenizer(__snake_case , return_tensors="""pt""" ).input_ids
_UpperCAmelCase = model.generate(__snake_case , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 108 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__magic_name__ = logging.get_logger(__name__)
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
a__ = os.getenv('SM_HP_MP_PARAMETERS','{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
a__ = json.loads(UpperCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
a__ = os.getenv('SM_FRAMEWORK_PARAMS','{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
a__ = json.loads(UpperCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled',UpperCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : str =field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _snake_case , )
@cached_property
def _lowerCAmelCase ( self : Optional[Any] ) -> "torch.device":
'''simple docstring'''
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
a__ = torch.device('cpu' )
a__ = 0
elif is_sagemaker_model_parallel_available():
a__ = smp.local_rank()
a__ = torch.device('cuda' , _snake_case )
a__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
a__ = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
a__ = torch.device('cuda' , self.local_rank )
a__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
a__ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
a__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
a__ = torch.device('cuda' , self.local_rank )
a__ = 1
if device.type == "cuda":
torch.cuda.set_device(_snake_case )
return device
@property
def _lowerCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def _lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
return False
| 232 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = ['''input_features''']
def __init__( self : Dict , __magic_name__ : str=80 , __magic_name__ : List[Any]=16_000 , __magic_name__ : Optional[int]=160 , __magic_name__ : List[Any]=30 , __magic_name__ : Dict=400 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[Any]=False , **__magic_name__ : Tuple , ) -> int:
super().__init__(
feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE_ = n_fft
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = chunk_length
SCREAMING_SNAKE_CASE_ = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE_ = self.n_samples // hop_length
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase__ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowercase__ , norm="slaney" , mel_scale="slaney" , )
def __A ( self : Optional[Any] , __magic_name__ : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = spectrogram(
lowercase__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
SCREAMING_SNAKE_CASE_ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ = np.maximum(lowercase__ , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] = 0.0 ) -> List[Any]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE_ = np.array(lowercase__ , np.intaa )
SCREAMING_SNAKE_CASE_ = []
for vector, length in zip(lowercase__ , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE_ = padding_value
normed_input_values.append(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] = True , __magic_name__ : Any = None , __magic_name__ : List[str] = None , __magic_name__ : Optional[Any] = None , __magic_name__ : Any = "max_length" , __magic_name__ : List[str] = None , __magic_name__ : Optional[Any] = None , __magic_name__ : Optional[int] = None , **__magic_name__ : Tuple , ) -> Tuple:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
SCREAMING_SNAKE_CASE_ = isinstance(lowercase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(lowercase__ , dtype=np.floataa )
elif isinstance(lowercase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ = self.pad(
lowercase__ , padding=lowercase__ , max_length=max_length if max_length else self.n_samples , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE_ = [self._np_extract_fbank_features(lowercase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase__ ):
SCREAMING_SNAKE_CASE_ = [np.asarray(lowercase__ , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE_ = padded_inputs.convert_to_tensors(lowercase__ )
return padded_inputs
def __A ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 702 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : str = logging.getLogger(__name__)
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase__ = field(
default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 356 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : torch.FloatTensor
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = ("DownEncoderBlock2D",) , snake_case_ = ("UpDecoderBlock2D",) , snake_case_ = (64,) , snake_case_ = 1 , snake_case_ = "silu" , snake_case_ = 3 , snake_case_ = 32 , snake_case_ = 256 , snake_case_ = 32 , snake_case_ = None , snake_case_ = 0.1_82_15 , snake_case_ = "group" , ) -> Optional[Any]:
super().__init__()
# pass init params to Encoder
_UpperCAmelCase = Encoder(
in_channels=snake_case_ , out_channels=snake_case_ , down_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , double_z=snake_case_ , )
_UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_UpperCAmelCase = nn.Convad(snake_case_ , snake_case_ , 1 )
_UpperCAmelCase = VectorQuantizer(snake_case_ , snake_case_ , beta=0.25 , remap=snake_case_ , sane_index_shape=snake_case_ )
_UpperCAmelCase = nn.Convad(snake_case_ , snake_case_ , 1 )
# pass init params to Decoder
_UpperCAmelCase = Decoder(
in_channels=snake_case_ , out_channels=snake_case_ , up_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , norm_type=snake_case_ , )
@apply_forward_hook
def __A ( self , snake_case_ , snake_case_ = True ) -> VQEncoderOutput:
_UpperCAmelCase = self.encoder(snake_case_ )
_UpperCAmelCase = self.quant_conv(snake_case_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case_ )
@apply_forward_hook
def __A ( self , snake_case_ , snake_case_ = False , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.quantize(snake_case_ )
else:
_UpperCAmelCase = h
_UpperCAmelCase = self.post_quant_conv(snake_case_ )
_UpperCAmelCase = self.decoder(snake_case_ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
def __A ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_UpperCAmelCase = sample
_UpperCAmelCase = self.encode(snake_case_ ).latents
_UpperCAmelCase = self.decode(snake_case_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
| 426 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def A__ ( A__ , A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( A__ , A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( A__ , A__ , A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ , split=A__ ).read()
_check_json_dataset(A__ , A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A__ ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
if issubclass(A__ , A__ ):
_UpperCAmelCase = jsonl_path
elif issubclass(A__ , A__ ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
def A__ ( A__ , A__ , A__=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(A__ , A__ )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A__ , A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": jsonl_path, "test": jsonl_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( A__ ) -> List[Any]:
'''simple docstring'''
return json.load(A__ )
def A__ ( A__ ) -> int:
'''simple docstring'''
return [json.loads(A__ ) for line in buffer]
class a :
"""simple docstring"""
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case_ ) == 10
def __A ( self , snake_case_ ) -> Any:
with pytest.raises(snake_case_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / F"""test.json.{extension}"""
_UpperCAmelCase = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(snake_case_ , snake_case_ , compression=snake_case_ ).write()
with fsspec.open(snake_case_ , "rb" , compression="infer" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(snake_case_ , "rb" , compression="infer" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content
| 426 | 1 |
import sys
_lowerCamelCase : Optional[Any] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _UpperCAmelCase (UpperCamelCase_ : str = N ):
'''simple docstring'''
_lowerCAmelCase : List[str] = -sys.maxsize - 1
for i in range(len(UpperCamelCase_ ) - 12 ):
_lowerCAmelCase : List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCAmelCase : Any = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196 |
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [int(UpperCamelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCamelCase_ ) == 4 and all(0 <= int(UpperCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input().strip()
_lowerCamelCase : int = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 196 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : Dict ={
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] =["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple =[
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase : str = "pt"
elif is_tf_available():
__lowercase : str = "tf"
else:
__lowercase : Dict = "jax"
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = PerceiverTokenizer
UpperCamelCase_ : str = False
def lowercase ( self : Any ) -> str:
super().setUp()
__snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ) -> Dict:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase ( self : str , **A_ : Any ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Union[str, Any] , A_ : Tuple , A_ : List[str]=False , A_ : List[Any]=20 , A_ : Union[str, Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case = []
for i in range(len(A_ ) ):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case = list(filter(lambda A_ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , A_ ) )
__snake_case = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
__snake_case = ''' ''' + output_txt
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = '''Unicode €.'''
__snake_case = tokenizer(A_ )
__snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , A_ )
# decoding
__snake_case = tokenizer.decode(A_ )
self.assertEqual(A_ , '''[CLS]Unicode €.[SEP]''' )
__snake_case = tokenizer('''e è é ê ë''' )
__snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , A_ )
# decoding
__snake_case = tokenizer.decode(A_ )
self.assertEqual(A_ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase ( self : str ) -> int:
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case = tokenizer(A_ , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0] )
else:
__snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = tokenizer(A_ , padding=A_ , return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , A_ )
self.assertIn('''attention_mask''' , A_ )
self.assertNotIn('''decoder_input_ids''' , A_ )
self.assertNotIn('''decoder_attention_mask''' , A_ )
def lowercase ( self : List[str] ) -> str:
__snake_case = self.perceiver_tokenizer
__snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
__snake_case = tokenizer(
text_target=A_ , max_length=32 , padding='''max_length''' , truncation=A_ , return_tensors=A_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__snake_case = tokenizer.__class__.from_pretrained(A_ )
__snake_case = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
__snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__snake_case = tokenizer.__class__.from_pretrained(A_ )
__snake_case = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case = tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def lowercase ( self : Optional[Any] ) -> str:
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(A_ )
__snake_case = [f"<extra_id_{i}>" for i in range(125 )]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
A_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=A_ )]
__snake_case = tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase ( self : int ) -> Optional[int]:
__snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def lowercase ( self : Dict ) -> List[Any]:
pass
def lowercase ( self : Tuple ) -> Dict:
pass
def lowercase ( self : Optional[int] ) -> List[str]:
pass
def lowercase ( self : int ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__snake_case = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ , A_ )
| 564 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["YolosFeatureExtractor"]
UpperCAmelCase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : str ):
__a = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def SCREAMING_SNAKE_CASE ( a_ : str ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(a_ )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(a_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(a_ ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : dict[str, str] ):
return "".join(cipher_map.get(a_ , a_ ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : dict[str, str] ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(a_ , a_ ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE ( ):
__a = input('Enter message to encode or decode: ' ).strip()
__a = input('Enter keyword: ' ).strip()
__a = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__a = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__a = create_cipher_map(a_ )
print(func(a_ , a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 490 | 1 |
def snake_case (UpperCAmelCase__ ) -> List[str]:
if len(__snake_case ) <= 1:
return lst
UpperCamelCase_: Union[str, Any] = 1
while i < len(__snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase_: Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase_: str = 1
return lst
if __name__ == "__main__":
A_ : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
A_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 57 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _snake_case ( A__ ):
'''simple docstring'''
def __init__( self : Optional[int] ):
UpperCAmelCase_ :int = []
def snake_case_ ( self : Tuple , snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any] , **snake_case : Optional[Any] ):
self.events.append('''on_init_end''' )
def snake_case_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : int , **snake_case : Dict ):
self.events.append('''on_train_begin''' )
def snake_case_ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , **snake_case : Any ):
self.events.append('''on_train_end''' )
def snake_case_ ( self : Tuple , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Union[str, Any] , **snake_case : Optional[int] ):
self.events.append('''on_epoch_begin''' )
def snake_case_ ( self : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , **snake_case : Optional[int] ):
self.events.append('''on_epoch_end''' )
def snake_case_ ( self : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Union[str, Any] , **snake_case : Optional[int] ):
self.events.append('''on_step_begin''' )
def snake_case_ ( self : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , **snake_case : str ):
self.events.append('''on_step_end''' )
def snake_case_ ( self : Union[str, Any] , snake_case : List[str] , snake_case : Any , snake_case : List[Any] , **snake_case : str ):
self.events.append('''on_evaluate''' )
def snake_case_ ( self : List[Any] , snake_case : str , snake_case : List[str] , snake_case : List[Any] , **snake_case : Dict ):
self.events.append('''on_predict''' )
def snake_case_ ( self : Optional[int] , snake_case : Any , snake_case : int , snake_case : Tuple , **snake_case : int ):
self.events.append('''on_save''' )
def snake_case_ ( self : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Any , **snake_case : Tuple ):
self.events.append('''on_log''' )
def snake_case_ ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[str] , **snake_case : Optional[Any] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :Any = tempfile.mkdtemp()
def snake_case_ ( self : Optional[Any] ):
shutil.rmtree(self.output_dir )
def snake_case_ ( self : Optional[Any] , snake_case : Tuple=0 , snake_case : Union[str, Any]=0 , snake_case : Optional[int]=64 , snake_case : Dict=64 , snake_case : Optional[Any]=None , snake_case : List[Any]=False , **snake_case : str ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase_ :str = RegressionDataset(length=snake_case )
UpperCAmelCase_ :Optional[Any] = RegressionDataset(length=snake_case )
UpperCAmelCase_ :List[Any] = RegressionModelConfig(a=snake_case , b=snake_case )
UpperCAmelCase_ :str = RegressionPreTrainedModel(snake_case )
UpperCAmelCase_ :Optional[int] = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def snake_case_ ( self : str , snake_case : Tuple , snake_case : List[Any] ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
UpperCAmelCase_ :Dict = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
UpperCAmelCase_ :Union[str, Any] = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def snake_case_ ( self : Any , snake_case : Dict ):
UpperCAmelCase_ :List[Any] = ['''on_init_end''', '''on_train_begin''']
UpperCAmelCase_ :Dict = 0
UpperCAmelCase_ :Tuple = len(trainer.get_eval_dataloader() )
UpperCAmelCase_ :Dict = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def snake_case_ ( self : int ):
UpperCAmelCase_ :Dict = self.get_trainer()
UpperCAmelCase_ :int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase_ :Union[str, Any] = self.get_trainer(disable_tqdm=snake_case )
UpperCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase_ :int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
UpperCAmelCase_ :Optional[int] = self.get_trainer()
UpperCAmelCase_ :Dict = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
UpperCAmelCase_ :List[Any] = self.get_trainer()
UpperCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
UpperCAmelCase_ :Dict = self.get_trainer()
UpperCAmelCase_ :int = trainer.callback_handler.callbacks[0]
UpperCAmelCase_ :List[str] = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def snake_case_ ( self : Tuple ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=snake_case )
UpperCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase_ :int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
UpperCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
UpperCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase_ :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
UpperCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
UpperCAmelCase_ :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
UpperCAmelCase_ :Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
UpperCAmelCase_ :Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
UpperCAmelCase_ :List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
UpperCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
UpperCAmelCase_ :str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 608 | 0 |
def _UpperCamelCase ( lowercase__ = 10 , lowercase__ = 22 ):
__SCREAMING_SNAKE_CASE : Optional[int] = range(1 , lowercase__ )
__SCREAMING_SNAKE_CASE : int = range(1 , lowercase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 717 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : Union[str, Any] = config_class
__SCREAMING_SNAKE_CASE : List[str] = has_text_modality
__SCREAMING_SNAKE_CASE : int = kwargs
__SCREAMING_SNAKE_CASE : List[Any] = common_properties
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : int = self.config_class(**self.inputs_dict )
__SCREAMING_SNAKE_CASE : str = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCAmelCase__ ):
try:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.parent.assertEqual(
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , msg=f'''`{name} value {idx} expected, but was {getattr(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : int = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , msg=f'''`{name} value {idx} expected, but was {getattr(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.config_class(**self.inputs_dict )
__SCREAMING_SNAKE_CASE : str = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(lowerCAmelCase__ , '''config.json''' )
config_first.to_json_file(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_class.from_json_file(lowerCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class.from_pretrained(lowerCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**self.inputs_dict )
__SCREAMING_SNAKE_CASE : int = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
config_first.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.config_class.from_pretrained(lowerCAmelCase__ , subfolder=lowerCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __magic_name__( self :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__SCREAMING_SNAKE_CASE : Optional[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __magic_name__( self :Optional[int] ) -> Optional[int]:
if self.config_class.is_composition:
return
__SCREAMING_SNAKE_CASE : Tuple = self.config_class()
self.parent.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(lowerCAmelCase__ , lowerCAmelCase__ ) != value:
wrong_values.append((key, getattr(lowerCAmelCase__ , lowerCAmelCase__ ), value) )
if len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE : Any = '''\n'''.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __magic_name__( self :str ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 260 | 0 |
def __A ( _A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_A ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(_A ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(_A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __A ( _A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_A ) == 0:
raise ValueError("Input list must be a non empty list" )
__a = 0
for val in series:
answer += val
return answer / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = """T5Config"""
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """mt5"""
_SCREAMING_SNAKE_CASE = MTaConfig
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """mt5"""
_SCREAMING_SNAKE_CASE = MTaConfig
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """mt5"""
_SCREAMING_SNAKE_CASE = MTaConfig
| 197 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 128 , lowerCamelCase=[16, 32, 64, 128] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 128 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = type_sequence_label_size
a__ = initializer_range
a__ = encoder_stride
a__ = num_attention_outputs
a__ = embed_dim
a__ = embed_dim + 1
a__ = resolution
a__ = depths
a__ = hidden_sizes
a__ = dim
a__ = mlp_expansion_ratio
def _A ( self ):
'''simple docstring'''
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def _A ( self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = TFEfficientFormerModel(config=lowerCamelCase )
a__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
a__ = self.type_sequence_label_size
a__ = TFEfficientFormerForImageClassification(lowerCamelCase )
a__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ = 1
a__ = TFEfficientFormerForImageClassification(lowerCamelCase )
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
A_ : int = False
A_ : List[str] = False
A_ : int = False
A_ : Optional[int] = False
A_ : List[Any] = False
def _A ( self ):
'''simple docstring'''
a__ = TFEfficientFormerModelTester(self )
a__ = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _A ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
a__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def _A ( self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ = model_class(lowerCamelCase )
a__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
a__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
a__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
a__ = seq_length * self.model_tester.chunk_length
else:
a__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
a__ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
a__ = getattr(self.model_tester , """seq_length""" , lowerCamelCase )
a__ = getattr(self.model_tester , """decoder_seq_length""" , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
a__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _A ( self ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
a__ = getattr(self.model_tester , """seq_length""" , lowerCamelCase )
a__ = getattr(self.model_tester , """encoder_seq_length""" , lowerCamelCase )
a__ = getattr(self.model_tester , """key_length""" , lowerCamelCase )
a__ = getattr(self.model_tester , """chunk_length""" , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
a__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
a__ = True
a__ = False
a__ = True
a__ = model_class(lowerCamelCase )
a__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
a__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ = True
a__ = model_class(lowerCamelCase )
a__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
a__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _A ( self ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
a__ = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
a__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
a__ = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def UpperCAmelCase ( ):
'''simple docstring'''
a__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _A ( self ):
'''simple docstring'''
a__ = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="""tf""" )
# forward pass
a__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
a__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
a__ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def _A ( self ):
'''simple docstring'''
a__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=lowerCamelCase , return_tensors="""tf""" )
# forward pass
a__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
a__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
a__ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[str] ={"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =[
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =[
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 412 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar("""T""")
class _UpperCAmelCase ( Generic[T] ):
def __init__( self , a__ ):
A_ : Tuple = data
A_ : List[str] = self
A_ : List[str] = 0
class _UpperCAmelCase ( Generic[T] ):
def __init__( self ):
A_ : dict[T, DisjointSetTreeNode[T]] = {}
def _lowerCamelCase ( self , a__ ):
A_ : List[Any] = DisjointSetTreeNode(lowercase__ )
def _lowerCamelCase ( self , a__ ):
A_ : Tuple = self.map[data]
if elem_ref != elem_ref.parent:
A_ : Any = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _lowerCamelCase ( self , a__ , a__ ):
if nodea.rank > nodea.rank:
A_ : Dict = nodea
else:
A_ : int = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _lowerCamelCase ( self , a__ , a__ ):
self.link(self.find_set(lowercase__ ) , self.find_set(lowercase__ ) )
class _UpperCAmelCase ( Generic[T] ):
def __init__( self ):
A_ : dict[T, dict[T, int]] = {}
def _lowerCamelCase ( self , a__ ):
if node not in self.connections:
A_ : List[Any] = {}
def _lowerCamelCase ( self , a__ , a__ , a__ ):
self.add_node(lowercase__ )
self.add_node(lowercase__ )
A_ : int = weight
A_ : List[Any] = weight
def _lowerCamelCase ( self ):
A_ : Optional[int] = []
A_ : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda a__ : x[2] )
# creating the disjoint set
A_ : int = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase__ )
# MST generation
A_ : List[Any] = 0
A_ : List[str] = 0
A_ : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A_ : Optional[Any] = edges[index]
index += 1
A_ : Any = disjoint_set.find_set(lowercase__ )
A_ : str = disjoint_set.find_set(lowercase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase__ , lowercase__ , lowercase__ )
disjoint_set.union(lowercase__ , lowercase__ )
return graph
| 569 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case_ = False
@skip_mps
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase,unittest.TestCase ):
_A = StableDiffusionAttendAndExcitePipeline
_A = False
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCamelCase ( cls ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(lowercase__ )
@classmethod
def __lowerCamelCase ( cls ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextModel(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCamelCase ( self , lowercase__ , lowercase__=0 ):
"""simple docstring"""
if str(lowercase__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "cpu"
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**lowercase__ ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase__ , 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __lowerCamelCase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4 )
def __lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def __lowerCamelCase ( cls ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(lowercase__ )
@classmethod
def __lowerCamelCase ( cls ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(51 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=lowercase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : Optional[int] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE_ : Any = [5, 7]
SCREAMING_SNAKE_CASE_ : Any = pipe(
prompt=lowercase__ , token_indices=lowercase__ , guidance_scale=7.5 , generator=lowercase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 421 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : NestedDataStructureLike[PathLike] , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : str , ) -> Optional[int]:
super().__init__(
lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , )
__snake_case : int = field
__snake_case : str = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths}
__snake_case : Dict = Json(
cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , field=lowerCamelCase , **lowerCamelCase , )
def __snake_case ( self : str ) -> str:
# Build iterable dataset
if self.streaming:
__snake_case : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , )
__snake_case : Dict = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class a :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Dataset , lowerCamelCase : Union[PathLike, BinaryIO] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : List[str] , ) -> List[str]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__snake_case : Optional[Any] = dataset
__snake_case : Tuple = path_or_buf
__snake_case : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Optional[int] = num_proc
__snake_case : Union[str, Any] = "utf-8"
__snake_case : List[str] = to_json_kwargs
def __snake_case ( self : Dict ) -> int:
__snake_case : Any = self.to_json_kwargs.pop("path_or_buf" , lowerCamelCase )
__snake_case : Optional[Any] = self.to_json_kwargs.pop("orient" , "records" )
__snake_case : Union[str, Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
__snake_case : str = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
__snake_case : Dict = self.to_json_kwargs.pop("compression" , lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=lowerCamelCase ) as buffer:
__snake_case : int = self._write(file_obj=lowerCamelCase , orient=lowerCamelCase , lines=lowerCamelCase , index=lowerCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
" was passed. Please provide a local path instead." )
__snake_case : Optional[Any] = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase , lines=lowerCamelCase , index=lowerCamelCase , **self.to_json_kwargs )
return written
def __snake_case ( self : Optional[Any] , lowerCamelCase : str ) -> List[str]:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = args
__snake_case : List[str] = query_table(
table=self.dataset.data , key=slice(lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : str = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase , orient=lowerCamelCase , lines=lowerCamelCase , index=lowerCamelCase , **lowerCamelCase )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __snake_case ( self : List[str] , lowerCamelCase : BinaryIO , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int , **lowerCamelCase : Union[str, Any] , ) -> int:
__snake_case : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
__snake_case : str = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase )
else:
__snake_case , __snake_case : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase , lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(lowerCamelCase )
return written
| 203 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : Any = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = {}
__snake_case : List[Any] = R".*sequential.(\d+).*"
__snake_case : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
__snake_case : List[Any] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
__snake_case : str = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[str] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : List[str] = value
__snake_case : Optional[int] = mixed_qkv.size(0 ) // 3
__snake_case : List[str] = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : str = mixed_qkv[qkv_dim * 2 :]
__snake_case : int = query_layer
__snake_case : Tuple = key_layer
__snake_case : List[str] = value_layer
else:
__snake_case : Tuple = value
return model_state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : List[str] = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
__snake_case : Union[str, Any] = clap_model.state_dict()
__snake_case : Any = rename_state_dict(__lowerCamelCase )
__snake_case : Dict = ClapConfig()
__snake_case : Dict = enable_fusion
__snake_case : Optional[Any] = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 203 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_UpperCAmelCase : Tuple = '''examples/'''
_UpperCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_UpperCAmelCase : Tuple = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_UpperCAmelCase : List[str] = '''README.md'''
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int ) -> Any:
'''simple docstring'''
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase =f.read()
lowercase , lowercase =REPLACE_PATTERNS[pattern]
lowercase =replace.replace('''VERSION''' , lowercase_ )
lowercase =re_pattern.sub(lowercase_ , lowercase_ )
with open(lowercase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowercase_ )
def UpperCamelCase ( lowercase_ : Tuple ) -> int:
'''simple docstring'''
for folder, directories, fnames in os.walk(lowercase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowercase_ , lowercase_ ) , lowercase_ , pattern='''examples''' )
def UpperCamelCase ( lowercase_ : int , lowercase_ : Any=False ) -> Optional[Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase_ , lowercase_ , lowercase_ )
if not patch:
update_version_in_examples(lowercase_ )
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase ='''🤗 Transformers currently provides the following architectures'''
lowercase ='''1. Want to contribute a new model?'''
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase =f.readlines()
# Find the start of the list.
lowercase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase =lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowercase_ )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase =f.read()
lowercase =REPLACE_PATTERNS['''init'''][0].search(lowercase_ ).groups()[0]
return packaging.version.parse(lowercase_ )
def UpperCamelCase ( lowercase_ : int=False ) -> Dict:
'''simple docstring'''
lowercase =get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase =default_version.base_version
elif patch:
lowercase =f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowercase =f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowercase =input(f'Which version are you releasing? [{default_version}]' )
if len(lowercase_ ) == 0:
lowercase =default_version
print(f'Updating version to {version}.' )
global_version_update(lowercase_ , patch=lowercase_ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase =get_version()
lowercase =f'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowercase =current_version.base_version
# Check with the user we got that right.
lowercase =input(f'Which version are we developing now? [{dev_version}]' )
if len(lowercase_ ) == 0:
lowercase =dev_version
print(f'Updating version to {version}.' )
global_version_update(lowercase_ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_UpperCAmelCase : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 72 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : Any = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def _A ( lowerCamelCase , lowerCamelCase = False ):
with open(lowerCamelCase , "r" , encoding="utf-8" ) as f:
a__ : Optional[int] = f.read()
a__ : Optional[Any] = content.split("\n" )
a__ : Optional[int] = []
a__ : Optional[Any] = 0
while line_idx < len(lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a__ : str = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a__ : Union[str, Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a__ : int = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a__ : Optional[Any] = sorted(lowerCamelCase , key=lambda lowerCamelCase : _re_identifier.search(lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowerCamelCase ) )
elif "\n".join(lowerCamelCase ) != content:
return True
def _A ( lowerCamelCase = False ):
a__ : List[str] = [os.path.join(lowerCamelCase , lowerCamelCase ) for f in os.listdir(lowerCamelCase ) if f.endswith(".py" )]
a__ : Any = [sort_auto_mapping(lowerCamelCase , overwrite=lowerCamelCase ) for fname in fnames]
if not overwrite and any(lowerCamelCase ):
a__ : Dict = [f for f, d in zip(lowerCamelCase , lowerCamelCase ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(lowerCamelCase )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 112 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger('transformers.models.speecht5')
lowerCAmelCase : Tuple = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
lowerCAmelCase : List[str] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
lowerCAmelCase : Dict = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
lowerCAmelCase : Optional[Any] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
lowerCAmelCase : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
lowerCAmelCase : Optional[int] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
lowerCAmelCase : List[Any] = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
lowerCAmelCase : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
lowerCAmelCase : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCAmelCase : int = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
lowerCAmelCase : int = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
lowerCAmelCase : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
lowerCAmelCase : Any = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def A_( A : Optional[Any] , A : Dict , A : str , A : Optional[int] , A : List[str]):
for attribute in key.split('.'):
UpperCamelCase = getattr(A , A)
if weight_type is not None:
UpperCamelCase = getattr(A , A).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''')
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''')
def A_( A : List[str] , A : Tuple):
for key in ignore_keys:
if key.endswith('.*'):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_( A : Union[str, Any] , A : List[str] , A : Optional[int]):
UpperCamelCase = []
if task == "s2t":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2T
UpperCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase = None
UpperCamelCase = MAPPING_T2S
UpperCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase = MAPPING_S2S
UpperCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''')
for name, value in fairseq_dict.items():
if should_ignore(A , A):
logger.info(f'''{name} was ignored''')
continue
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase , UpperCamelCase = key.split('.*.')
if prefix in name and suffix in name:
UpperCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A)[0].split('.')[-2]
UpperCamelCase = mapped_key.replace('*' , A)
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(A , A , A , A , A)
continue
if not is_used:
unused_weights.append(A)
logger.warning(f'''Unused weights: {unused_weights}''')
def A_( A : Dict , A : Optional[int] , A : str , A : Dict , A : Any):
UpperCamelCase = full_name.split('conv_layers.')[-1]
UpperCamelCase = name.split('.')
UpperCamelCase = int(items[0])
UpperCamelCase = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(A)
@torch.no_grad()
def A_( A : Optional[Any] , A : List[str] , A : Tuple , A : Optional[Any]=None , A : Any=None , A : Optional[int]=None , ):
if config_path is not None:
UpperCamelCase = SpeechTaConfig.from_pretrained(A)
else:
UpperCamelCase = SpeechTaConfig()
if task == "s2t":
UpperCamelCase = config.max_text_positions
UpperCamelCase = SpeechTaForSpeechToText(A)
elif task == "t2s":
UpperCamelCase = 1876
UpperCamelCase = 600
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForTextToSpeech(A)
elif task == "s2s":
UpperCamelCase = 1876
UpperCamelCase = config.max_speech_positions
UpperCamelCase = SpeechTaForSpeechToSpeech(A)
else:
raise ValueError(f'''Unknown task name: {task}''')
if vocab_path:
UpperCamelCase = SpeechTaTokenizer(A , model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken('<mask>' , lstrip=A , rstrip=A)
UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
UpperCamelCase = SpeechTaFeatureExtractor()
UpperCamelCase = SpeechTaProcessor(tokenizer=A , feature_extractor=A)
processor.save_pretrained(A)
UpperCamelCase = torch.load(A)
recursively_load_weights(fairseq_checkpoint['model'] , A , A)
model.save_pretrained(A)
if repo_id:
print('Pushing to the hub...')
processor.push_to_hub(A)
model.push_to_hub(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 432 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCamelCase = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCamelCase = shift_tokens_right(A_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase = model(A_ , decoder_input_ids=A_ ).logits
UpperCamelCase = optax.softmax_cross_entropy(A_ , onehot(A_ , logits.shape[-1] ) ).mean()
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 432 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class A :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=13 , _UpperCamelCase : Union[str, Any]=7 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=99 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Any=0.0 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=512 , _UpperCamelCase : str=16 , _UpperCamelCase : str=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : int=4 , _UpperCamelCase : Tuple=None , ):
_lowercase: Any = parent
_lowercase: int = batch_size
_lowercase: Tuple = seq_length
_lowercase: Any = is_training
_lowercase: Any = use_input_mask
_lowercase: Union[str, Any] = use_token_type_ids
_lowercase: int = use_labels
_lowercase: int = vocab_size
_lowercase: int = hidden_size
_lowercase: Any = num_hidden_layers
_lowercase: Tuple = num_attention_heads
_lowercase: List[str] = intermediate_multiple_size
_lowercase: Dict = hidden_act
_lowercase: Optional[int] = hidden_dropout
_lowercase: Optional[int] = attention_dropout
_lowercase: Dict = weight_tying
_lowercase: Union[str, Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: str = type_sequence_label_size
_lowercase: Optional[int] = initializer_range
_lowercase: List[Any] = num_labels
_lowercase: Any = num_choices
_lowercase: Optional[Any] = scope
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase: Optional[Any] = None
if self.use_input_mask:
_lowercase: List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase: Dict = None
if self.use_labels:
_lowercase: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowercase: Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Optional[int]):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : int):
_lowercase , _lowercase , _lowercase , _lowercase: int = self.prepare_config_and_inputs()
_lowercase: str = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict):
_lowercase: Dict = GPTNeoXJapaneseModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase)
_lowercase: Optional[int] = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any):
_lowercase: Tuple = True
_lowercase: Optional[int] = GPTNeoXJapaneseModel(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: int = model(_UpperCamelCase , attention_mask=_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str):
_lowercase: Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any):
_lowercase: Tuple = True
_lowercase: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
# first forward pass
_lowercase: int = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase)
_lowercase: str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase: List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowercase: Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_lowercase: List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
_lowercase: List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
_lowercase: Union[str, Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase)
_lowercase: Optional[int] = output_from_no_past["hidden_states"][0]
_lowercase: Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )["hidden_states"][0]
# select random slice
_lowercase: Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_lowercase: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase: Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3))
def UpperCAmelCase__ ( self : Dict):
_lowercase: Union[str, Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase: Tuple = config_and_inputs
_lowercase: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase : List[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : int = False
lowerCamelCase : List[str] = False
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Optional[int] = GPTNeoXJapaneseModelTester(self)
_lowercase: List[str] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self : Optional[Any]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str]):
_lowercase , _lowercase , _lowercase , _lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase , _lowercase , _lowercase , _lowercase: int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
# This regression test was failing with PyTorch < 1.3
_lowercase , _lowercase , _lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase: int = None
self.model_tester.create_and_check_model_as_decoder(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase , _lowercase , _lowercase , _lowercase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : str):
_lowercase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : Any):
_lowercase: List[str] = "abeja/gpt-neox-japanese-2.7b"
_lowercase: Dict = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
_lowercase: Union[str, Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
_lowercase: str = GPTNeoXJapaneseTokenizer.from_pretrained(_UpperCamelCase)
_lowercase: Dict = GPTNeoXJapaneseForCausalLM.from_pretrained(_UpperCamelCase)
_lowercase: List[Any] = []
for prompt in prompts:
_lowercase: List[str] = tokenizer(_UpperCamelCase , return_tensors="pt").input_ids
_lowercase: List[Any] = model.generate(_UpperCamelCase , max_length=50)
_lowercase: str = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase)
predicted_outputs += generated_string
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
| 226 |
def __lowerCAmelCase ( __magic_name__ = 1_0_0 ):
_lowercase: Dict = set()
_lowercase: List[Any] = 0
_lowercase: List[Any] = n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
_lowercase: int = a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 226 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {}
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Tuple = """llama"""
_A : List[str] = ["""past_key_values"""]
def __init__(self , lowercase__=3_20_00 , lowercase__=40_96 , lowercase__=1_10_08 , lowercase__=32 , lowercase__=32 , lowercase__=None , lowercase__="silu" , lowercase__=20_48 , lowercase__=0.02 , lowercase__=1e-6 , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=1 , lowercase__=False , lowercase__=None , **lowercase__ , ):
snake_case_ : Tuple = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : List[Any] = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : int = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case_ : int = num_attention_heads
snake_case_ : Tuple = num_key_value_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Any = initializer_range
snake_case_ : Any = rms_norm_eps
snake_case_ : Tuple = pretraining_tp
snake_case_ : List[Any] = use_cache
snake_case_ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
snake_case_ : int = self.rope_scaling.get("""type""" , lowercase__ )
snake_case_ : Optional[Any] = self.rope_scaling.get("""factor""" , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 48 |
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case (UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Optional[int] = args.pruning_method
UpperCamelCase_: Any = args.threshold
UpperCamelCase_: Any = args.model_name_or_path.rstrip('/' )
UpperCamelCase_: List[Any] = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
UpperCamelCase_: List[Any] = torch.load(os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) )
UpperCamelCase_: List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase_: Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase_: List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
UpperCamelCase_: Tuple = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
UpperCamelCase_: Tuple = MagnitudeBinarizer.apply(inputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ )
UpperCamelCase_: int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase_: Union[str, Any] = name[:-6]
UpperCamelCase_: List[Any] = model[F'''{prefix_}mask_scores''']
UpperCamelCase_: Union[str, Any] = TopKBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Any = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase_: Any = name[:-6]
UpperCamelCase_: List[Any] = model[F'''{prefix_}mask_scores''']
UpperCamelCase_: Any = ThresholdBinarizer.apply(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase_: Any = name[:-6]
UpperCamelCase_: Tuple = model[F'''{prefix_}mask_scores''']
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = -0.1, 1.1
UpperCamelCase_: int = torch.sigmoid(UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = s * (r - l) + l
UpperCamelCase_: Dict = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase_: Optional[Any] = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
UpperCamelCase_: Any = os.path.join(
os.path.dirname(UpperCAmelCase__ ) , F'''bertarized_{os.path.basename(UpperCAmelCase__ )}''' )
if not os.path.isdir(UpperCAmelCase__ ):
shutil.copytree(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
A_ : List[str] = parser.parse_args()
main(args)
| 57 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase : Any = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowercase : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase : Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"{key} -> {new_key}" )
lowercase : Dict = s_dict.pop(SCREAMING_SNAKE_CASE__ )
return s_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase , lowercase : Optional[Any] = emb.weight.shape
lowercase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowercase : str = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bytes:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = url.split("""/""" )[-2]
lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowercase : Any = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE__ ) as source, open(SCREAMING_SNAKE_CASE__ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=SCREAMING_SNAKE_CASE__ , unit_divisor=1_024 ) as loop:
while True:
lowercase : Any = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE__ )
loop.update(len(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
lowercase : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase : Tuple = original_checkpoint["""dims"""]
lowercase : str = original_checkpoint["""model_state_dict"""]
lowercase : Tuple = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
rename_keys(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = True
lowercase : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowercase : Tuple = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=SCREAMING_SNAKE_CASE__ , decoder_ffn_dim=SCREAMING_SNAKE_CASE__ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowercase : Any = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowercase : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase : str = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase : Optional[int] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 336 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a ( UpperCamelCase_ : Dict ) -> int:
snake_case__ =torch.exp(_lowercase )
snake_case__ =torch.sum(_lowercase , dim=1 ) # sum of exp(x_i)
snake_case__ =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class a__( nn.Module ):
def __init__( self , _UpperCAmelCase ) -> Tuple:
super().__init__()
snake_case__ =config.output_attentions
snake_case__ =config.output_hidden_states
snake_case__ =nn.ModuleList([BertLayer(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
snake_case__ =nn.ModuleList([BertHighway(lowerCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
snake_case__ =[-1 for _ in range(config.num_hidden_layers )]
def _lowercase ( self , _UpperCAmelCase ) -> Any:
if (type(lowerCAmelCase__ ) is float) or (type(lowerCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case__ =x
else:
snake_case__ =x
def _lowercase ( self , _UpperCAmelCase ) -> int:
snake_case__ =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Dict:
snake_case__ =()
snake_case__ =()
snake_case__ =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case__ =all_hidden_states + (hidden_states,)
snake_case__ =layer_module(
lowerCAmelCase__ , lowerCAmelCase__ , head_mask[i] , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case__ =layer_outputs[0]
if self.output_attentions:
snake_case__ =all_attentions + (layer_outputs[1],)
snake_case__ =(hidden_states,)
if self.output_hidden_states:
snake_case__ =current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case__ =current_outputs + (all_attentions,)
snake_case__ =self.highway[i](lowerCAmelCase__ )
# logits, pooled_output
if not self.training:
snake_case__ =highway_exit[0]
snake_case__ =entropy(lowerCAmelCase__ )
snake_case__ =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case__ =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case__ =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCAmelCase__ , i + 1 )
else:
snake_case__ =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case__ =all_hidden_states + (hidden_states,)
snake_case__ =(hidden_states,)
if self.output_hidden_states:
snake_case__ =outputs + (all_hidden_states,)
if self.output_attentions:
snake_case__ =outputs + (all_attentions,)
snake_case__ =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , a__ , )
class a__( a__ ):
def __init__( self , _UpperCAmelCase ) -> Any:
super().__init__(lowerCAmelCase__ )
snake_case__ =config
snake_case__ =BertEmbeddings(lowerCAmelCase__ )
snake_case__ =DeeBertEncoder(lowerCAmelCase__ )
snake_case__ =BertPooler(lowerCAmelCase__ )
self.init_weights()
def _lowercase ( self ) -> List[Any]:
self.encoder.init_highway_pooler(self.pooler )
def _lowercase ( self ) -> Union[str, Any]:
return self.embeddings.word_embeddings
def _lowercase ( self , _UpperCAmelCase ) -> Any:
snake_case__ =value
def _lowercase ( self , _UpperCAmelCase ) -> Union[str, Any]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase__ )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def _lowercase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
snake_case__ =input_ids.size()
elif inputs_embeds is not None:
snake_case__ =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
snake_case__ =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case__ =torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if encoder_attention_mask is None:
snake_case__ =torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ )
if token_type_ids is None:
snake_case__ =torch.zeros(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case__ =self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case__ =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case__ =encoder_attention_mask[:, None, None, :]
snake_case__ =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case__ =(1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case__ =self.get_head_mask(lowerCAmelCase__ , self.config.num_hidden_layers )
snake_case__ =self.embeddings(
input_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ )
snake_case__ =self.encoder(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
snake_case__ =encoder_outputs[0]
snake_case__ =self.pooler(lowerCAmelCase__ )
snake_case__ =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a__( a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
snake_case__ =message
snake_case__ =exit_layer # start from 1!
class a__( nn.Module ):
def __init__( self , _UpperCAmelCase ) -> Dict:
super().__init__()
snake_case__ =BertPooler(lowerCAmelCase__ )
snake_case__ =nn.Dropout(config.hidden_dropout_prob )
snake_case__ =nn.Linear(config.hidden_size , config.num_labels )
def _lowercase ( self , _UpperCAmelCase ) -> Union[str, Any]:
# Pooler
snake_case__ =encoder_outputs[0]
snake_case__ =self.pooler(lowerCAmelCase__ )
# "return" pooler_output
# BertModel
snake_case__ =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case__ =bmodel_output[1]
snake_case__ =self.dropout(lowerCAmelCase__ )
snake_case__ =self.classifier(lowerCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class a__( a__ ):
def __init__( self , _UpperCAmelCase ) -> Optional[int]:
super().__init__(lowerCAmelCase__ )
snake_case__ =config.num_labels
snake_case__ =config.num_hidden_layers
snake_case__ =DeeBertModel(lowerCAmelCase__ )
snake_case__ =nn.Dropout(config.hidden_dropout_prob )
snake_case__ =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def _lowercase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> Optional[int]:
snake_case__ =self.num_layers
try:
snake_case__ =self.bert(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case__ =outputs[1]
snake_case__ =self.dropout(lowerCAmelCase__ )
snake_case__ =self.classifier(lowerCAmelCase__ )
snake_case__ =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ =e.message
snake_case__ =e.exit_layer
snake_case__ =outputs[0]
if not self.training:
snake_case__ =entropy(lowerCAmelCase__ )
snake_case__ =[]
snake_case__ =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ =MSELoss()
snake_case__ =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ =CrossEntropyLoss()
snake_case__ =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case__ =[]
for highway_exit in outputs[-1]:
snake_case__ =highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ =MSELoss()
snake_case__ =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ =CrossEntropyLoss()
snake_case__ =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase__ )
if train_highway:
snake_case__ =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ =(loss,) + outputs
if not self.training:
snake_case__ =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 709 |
'''simple docstring'''
from __future__ import annotations
def a ( UpperCamelCase_ : list[float] , UpperCamelCase_ : list[float] ) -> float:
snake_case__ =sorted(numsa + numsa )
snake_case__ , snake_case__ =divmod(len(UpperCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 581 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_a : Dict = 4
_a : Union[str, Any] = 3
class lowercase_ ( _lowercase ):
'''simple docstring'''
pass
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
for shard in shards:
for i in range(SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def lowerCamelCase__ ( ):
UpperCAmelCase = int(os.environ['RANK'] )
UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase = ArgumentParser()
parser.add_argument('--streaming' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE )
parser.add_argument('--num_workers' , type=SCREAMING_SNAKE_CASE , default=0 )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.streaming
UpperCAmelCase = args.num_workers
UpperCAmelCase = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(SCREAMING_SNAKE_CASE )]}
UpperCAmelCase = IterableDataset.from_generator(SCREAMING_SNAKE_CASE , gen_kwargs=SCREAMING_SNAKE_CASE )
if not streaming:
UpperCAmelCase = Dataset.from_list(list(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = split_dataset_by_node(SCREAMING_SNAKE_CASE , rank=SCREAMING_SNAKE_CASE , world_size=SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE )
UpperCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 447 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]:
lowerCAmelCase__ : int = R'''\w+[.]\d+'''
lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase )
for pat in pats:
lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) )
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase__ : str = rename_key(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase )
return unflatten_dict(UpperCamelCase )
| 678 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Any = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : Optional[Any] ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(__lowerCAmelCase : Any , __lowerCAmelCase : str="" , __lowerCAmelCase : Dict="." ):
lowerCamelCase__ = []
for k, v in d.items():
lowerCamelCase__ = parent_key + sep + k if parent_key else k
if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__lowerCAmelCase )
lowerCamelCase__ = argparse.Namespace()
with open(__lowerCAmelCase , """r""" ) as yaml_file:
try:
lowerCamelCase__ = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader )
lowerCamelCase__ = flatten_yaml_as_dict(__lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(__lowerCAmelCase , str(__lowerCAmelCase ) ) )
return config
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = MobileViTVaConfig()
lowerCamelCase__ = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase__ = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ = 384
else:
lowerCamelCase__ = 256
lowerCamelCase__ = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase__ = 2_1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase__ = 384
else:
lowerCamelCase__ = 256
lowerCamelCase__ = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase__ = 151
lowerCamelCase__ = 512
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase__ = 21
lowerCamelCase__ = 512
lowerCamelCase__ = """pascal-voc-id2label.json"""
lowerCamelCase__ = True
# orig_config
lowerCamelCase__ = load_orig_config_file(__lowerCAmelCase )
assert getattr(__lowerCAmelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ = getattr(__lowerCAmelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(__lowerCAmelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ = getattr(__lowerCAmelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase__ = getattr(__lowerCAmelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = dct.pop(__lowerCAmelCase )
lowerCamelCase__ = val
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple=False ):
if base_model:
lowerCamelCase__ = """"""
else:
lowerCamelCase__ = """mobilevitv2."""
lowerCamelCase__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ = k[8:]
else:
lowerCamelCase__ = k
if ".block." in k:
lowerCamelCase__ = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase__ = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase__ = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase__ = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowerCamelCase__ = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase__ = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase__ = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowerCamelCase__ = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase__ = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase__ = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ = [0, 1]
elif i == 4:
lowerCamelCase__ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase__ = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase__ = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase__ = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase__ = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase__ = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase__ = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase__ = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase__ = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase__ = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def A__ ( __lowerCAmelCase : Dict ):
lowerCamelCase__ = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(__lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
lowerCamelCase__ = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase )
# load original state_dict
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase__ = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval()
lowerCamelCase__ = False
else:
lowerCamelCase__ = MobileViTVaForImageClassification(__lowerCAmelCase ).eval()
lowerCamelCase__ = False
# remove and rename some keys of load the original model
lowerCamelCase__ = checkpoint
remove_unused_keys(__lowerCAmelCase )
lowerCamelCase__ = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load modified state_dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase__ = model(**__lowerCAmelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase : Dict = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 9 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A_ ( lowercase_ ) -> int:
_snake_case : Dict = filter(lambda lowercase_ : p.requires_grad , model.parameters() )
_snake_case : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ = logging.getLogger(__name__)
def A_ ( lowercase_ , lowercase_ ) -> Dict:
if metric == "rouge2":
_snake_case : int = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_snake_case : Optional[int] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_snake_case : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
_snake_case : List[Any] = ModelCheckpoint(
dirpath=lowercase_ , filename=lowercase_ , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def A_ ( lowercase_ , lowercase_ ) -> List[str]:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowercase_ , verbose=lowercase_ , )
class A (pl.Callback ):
def __a ( self , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
_snake_case : List[Any] = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def __a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=True ) -> None:
'''simple docstring'''
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_snake_case : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_snake_case : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : List[str] = od / '''test_results.txt'''
_snake_case : List[str] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Any = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_snake_case : Optional[Any] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_ , '''a+''' ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : List[str] = metrics[key]
if isinstance(lowercase_ , torch.Tensor ):
_snake_case : Tuple = val.item()
_snake_case : Optional[Any] = F'''{key}: {val:.6f}\n'''
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
_snake_case : str = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowercase_ )
@rank_zero_only
def __a ( self , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
try:
_snake_case : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : Any = pl_module.model.num_parameters()
_snake_case : str = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def __a ( self , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase_ , lowercase_ , '''test''' )
@rank_zero_only
def __a ( self , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 326 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
# Initialise PyTorch model
_snake_case : List[str] = FunnelConfig.from_json_file(lowercase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case : str = FunnelBaseModel(lowercase_ ) if base_model else FunnelModel(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 326 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] = 10**-10 ):
snake_case__ : int = a
while True:
snake_case__ : List[Any] = Decimal(UpperCamelCase__ ) - (
Decimal(eval(UpperCamelCase__ ) ) / Decimal(eval(str(diff(UpperCamelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase__ ) ) < precision: # noqa: S307
return float(UpperCamelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
from manim import *
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase_ : int = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
lowerCAmelCase_ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
lowerCAmelCase_ : List[Any] = VGroup(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
lowerCAmelCase_ : Optional[Any] = Text("CPU" ,font_size=24 )
lowerCAmelCase_ : List[Any] = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0.5 ,aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowerCAmelCase_ : Dict = [mem.copy() for i in range(1 )]
lowerCAmelCase_ : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
lowerCAmelCase_ : Union[str, Any] = Text("GPU" ,font_size=24 )
lowerCAmelCase_ : Dict = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0.5 ,aligned_edge=__lowerCAmelCase )
gpu.align_to(__lowerCAmelCase ,__lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowerCAmelCase )
lowerCAmelCase_ : Dict = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0 )
lowerCAmelCase_ : Any = Text("Model" ,font_size=24 )
lowerCAmelCase_ : str = Group(__lowerCAmelCase ,__lowerCAmelCase ).arrange(__lowerCAmelCase ,buff=0.5 ,aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowerCAmelCase ,run_time=1 ) ,Create(__lowerCAmelCase ,run_time=1 ) ,Create(__lowerCAmelCase ,run_time=1 ) ,)
lowerCAmelCase_ : Optional[int] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCAmelCase_ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : Any = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ,run_time=2.5 ) ,Write(__lowerCAmelCase ) ,Write(__lowerCAmelCase ) )
self.add(__lowerCAmelCase )
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(__lowerCAmelCase ):
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(__lowerCAmelCase )
cpu_target.generate_target()
lowerCAmelCase_ : Tuple = 0.46 / 4
lowerCAmelCase_ : Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=__lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=__lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=__lowerCAmelCase ,buff=0.0 )
cpu_targs.append(__lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowerCAmelCase ) )
second_animations.append(MoveToTarget(__lowerCAmelCase ,run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait()
| 659 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 481 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """marian"""
__snake_case = ["""past_key_values"""]
__snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _lowercase=58101 , _lowercase=None , _lowercase=1024 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=1024 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=58100 , _lowercase=False , _lowercase=58100 , _lowercase=0 , _lowercase=0 , _lowercase=True , **_lowercase , ) -> Dict:
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[str] = decoder_vocab_size or vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : str = d_model
_lowerCamelCase : int = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Union[str, Any] = encoder_attention_heads
_lowerCamelCase : Any = decoder_ffn_dim
_lowerCamelCase : Dict = decoder_layers
_lowerCamelCase : Optional[int] = decoder_attention_heads
_lowerCamelCase : Any = dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Any = activation_dropout
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : Optional[int] = init_std
_lowerCamelCase : str = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : Tuple = use_cache
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : int = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase : List[str] = {0: '''batch'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase, _lowerCamelCase : Tuple = self.num_layers
for i in range(_lowercase ):
_lowerCamelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_lowerCamelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Tuple = super().outputs
else:
_lowerCamelCase : str = super(_lowercase , self ).outputs
if self.use_past:
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.num_layers
for i in range(_lowercase ):
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
_lowerCamelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
_lowerCamelCase : Optional[Any] = seq_length if not self.use_past else 1
_lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
_lowerCamelCase : Tuple = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase : Tuple = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase, _lowerCamelCase : List[Any] = common_inputs['''input_ids'''].shape
_lowerCamelCase : Dict = common_inputs['''decoder_input_ids'''].shape[1]
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.num_attention_heads
_lowerCamelCase : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : List[Any] = decoder_seq_length + 3
_lowerCamelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
_lowerCamelCase : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase, _lowerCamelCase : str = self.num_layers
_lowerCamelCase : Any = min(_lowercase , _lowercase )
_lowerCamelCase : Optional[Any] = max(_lowercase , _lowercase ) - min_num_layers
_lowerCamelCase : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
_lowerCamelCase : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
_lowerCamelCase : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase, _lowerCamelCase : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase : Union[str, Any] = seqlen + 2
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.num_layers
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.num_attention_heads
_lowerCamelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Union[str, Any] = common_inputs['''attention_mask'''].dtype
_lowerCamelCase : str = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
_lowerCamelCase : int = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : int = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Any = tokenizer.num_special_tokens_to_add(_lowercase )
_lowerCamelCase : Any = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : int = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase : str = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
_lowerCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[str] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
_lowerCamelCase : Optional[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def a__ ( self ) -> float:
return 1E-4
| 558 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ) -> List[Any]:
_lowerCamelCase : Tuple = 1.0 if scale is None else scale
_lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def a__ ( self ) -> Dict:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self ) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self ) -> Union[str, Any]:
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , **_lowercase ) -> None:
super().__init__(**_lowercase )
_lowerCamelCase : Union[str, Any] = args_dim
_lowerCamelCase : Union[str, Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
_lowerCamelCase : str = domain_map
def a__ ( self , _lowercase ) -> Tuple[torch.Tensor]:
_lowerCamelCase : Any = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Union[str, Any]:
super().__init__()
_lowerCamelCase : Optional[Any] = function
def a__ ( self , _lowercase , *_lowercase ) -> str:
return self.function(_lowercase , *_lowercase )
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase = 1 ) -> None:
_lowerCamelCase : int = dim
_lowerCamelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self , _lowercase ) -> Dict:
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None , ) -> Distribution:
_lowerCamelCase : Any = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def a__ ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self ) -> int:
return len(self.event_shape )
@property
def a__ ( self ) -> float:
return 0.0
def a__ ( self , _lowercase ) -> nn.Module:
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self , *_lowercase ) -> int:
raise NotImplementedError()
@staticmethod
def a__ ( _lowercase ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"df": 1, "loc": 1, "scale": 1}
__snake_case = StudentT
@classmethod
def a__ ( cls , _lowercase , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowerCamelCase : List[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"loc": 1, "scale": 1}
__snake_case = Normal
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> List[Any]:
_lowerCamelCase : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = {"total_count": 1, "logits": 1}
__snake_case = NegativeBinomial
@classmethod
def a__ ( cls , _lowercase , _lowercase ) -> int:
_lowerCamelCase : str = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self , _lowercase ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def a__ ( self , _lowercase , _lowercase = None , _lowercase = None ) -> Distribution:
_lowerCamelCase, _lowerCamelCase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 558 | 1 |
import argparse
import copy
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = {}
with open(_lowercase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
with open(_lowercase ) as f:
__UpperCamelCase = f.read(1 )
__UpperCamelCase = start_node
__UpperCamelCase = []
__UpperCamelCase = start_node
__UpperCamelCase = 0
while visiting not in first_solution:
__UpperCamelCase = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowercase ) and k[0] not in first_solution:
__UpperCamelCase = k[1]
__UpperCamelCase = k[0]
first_solution.append(_lowercase )
__UpperCamelCase = distance_of_first_solution + int(_lowercase )
__UpperCamelCase = best_node
first_solution.append(_lowercase )
__UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def _A ( _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = []
for n in solution[1:-1]:
__UpperCamelCase = solution.index(_lowercase )
for kn in solution[1:-1]:
__UpperCamelCase = solution.index(_lowercase )
if n == kn:
continue
__UpperCamelCase = copy.deepcopy(_lowercase )
__UpperCamelCase = kn
__UpperCamelCase = n
__UpperCamelCase = 0
for k in _tmp[:-1]:
__UpperCamelCase = _tmp[_tmp.index(_lowercase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCamelCase = distance + int(i[1] )
_tmp.append(_lowercase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = first_solution
__UpperCamelCase = []
__UpperCamelCase = distance_of_first_solution
__UpperCamelCase = solution
while count <= iters:
__UpperCamelCase = find_neighborhood(_lowercase , _lowercase )
__UpperCamelCase = 0
__UpperCamelCase = neighborhood[index_of_best_solution]
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = False
while not found:
__UpperCamelCase = 0
while i < len(_lowercase ):
if best_solution[i] != solution[i]:
__UpperCamelCase = best_solution[i]
__UpperCamelCase = solution[i]
break
__UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCamelCase = True
__UpperCamelCase = best_solution[:-1]
__UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCamelCase = cost
__UpperCamelCase = solution
else:
__UpperCamelCase = index_of_best_solution + 1
__UpperCamelCase = neighborhood[index_of_best_solution]
if len(_lowercase ) >= size:
tabu_list.pop(0 )
__UpperCamelCase = count + 1
return best_solution_ever, best_cost
def _A ( _lowercase=None ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = generate_neighbours(args.File )
__UpperCamelCase, __UpperCamelCase = generate_first_solution(
args.File , _lowercase )
__UpperCamelCase, __UpperCamelCase = tabu_search(
_lowercase , _lowercase , _lowercase , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 1 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase : List[str] = []
def _SCREAMING_SNAKE_CASE (A , A , A ) -> bool:
"""simple docstring"""
for i in range(len(A ) ):
if board[row][i] == 1:
return False
for i in range(len(A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , len(A ) ) ):
if board[i][j] == 1:
return False
return True
def _SCREAMING_SNAKE_CASE (A , A ) -> bool:
"""simple docstring"""
if row >= len(A ):
solution.append(A )
printboard(A )
print()
return True
for i in range(len(A ) ):
if is_safe(A , A , A ):
lowercase__ = 1
solve(A , row + 1 )
lowercase__ = 0
return False
def _SCREAMING_SNAKE_CASE (A ) -> None:
"""simple docstring"""
for i in range(len(A ) ):
for j in range(len(A ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
lowerCamelCase : Optional[Any] = 8
lowerCamelCase : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 460 | 0 |
"""simple docstring"""
a :Union[str, Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
a :Union[str, Any] = {value: key for key, value in encode_dict.items()}
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def _lowercase ( __lowerCAmelCase ) -> str:
if set(__lowerCAmelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
for word in coded.split():
while len(__lowerCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
SCREAMING_SNAKE_CASE__ : str = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 12 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE__ : str = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE__ : Dict = fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , """:""" , val.size() )
else:
print(__lowerCAmelCase , """:""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE__ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : List[str] = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE__ : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : Dict = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = param.view(*__lowerCAmelCase )
return param
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
# The converted output model.
SCREAMING_SNAKE_CASE__ : List[str] = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE__ : List[str] = input_state_dict.get("""args""" , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE__ : Dict = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE__ : List[str] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE__ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 0.0
# The model.
SCREAMING_SNAKE_CASE__ : Any = input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE__ : Any = model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE__ : str = lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE__ : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE__ : Any = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE__ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE__ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE__ : List[Any] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE__ : str = layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE__ : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE__ : str = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE__ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE__ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE__ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = masked_bias
SCREAMING_SNAKE_CASE__ : List[str] = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE__ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE__ : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : Any = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
SCREAMING_SNAKE_CASE__ : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE__ : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : int = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE__ : str = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE__ : Tuple = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> List[Any]:
# Create the argument parser.
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__lowerCAmelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__lowerCAmelCase , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : int = input_state_dict.get("""args""" , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE__ : Dict = """gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu_new"""
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE__ : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE__ : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE__ : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE__ : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE__ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type(__lowerCAmelCase ).__name__
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 12 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=3 , lowerCamelCase=10 , lowerCamelCase=[10, 20, 30, 40] , lowerCamelCase=[1, 1, 2, 1] , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=3 , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = embeddings_size
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = hidden_act
__a = num_labels
__a = scope
__a = len(lowerCamelCase )
def a__ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def a__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = TFRegNetModel(config=lowerCamelCase )
__a = model(lowerCamelCase , training=lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.num_labels
__a = TFRegNetForImageClassification(lowerCamelCase )
__a = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : List[str] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_snake_case : List[str] = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_snake_case : Optional[Any] = False
_snake_case : Any = False
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : int = False
def a__ ( self ):
__a = TFRegNetModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def a__ ( self ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def a__ ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def a__ ( self ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def a__ ( self ):
pass
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a = layer_type
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase={} ):
__a = model(lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase )
__a = model(lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase , lowerCamelCase ):
if isinstance(lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase , lowerCamelCase ):
recursive_check(lowerCamelCase , lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowerCamelCase , lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(lowerCamelCase , lowerCamelCase )
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"output_hidden_states": True} )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"output_hidden_states": True} )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def a__ ( self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFRegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowerCamelCase( ):
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def a__ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ):
__a = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
__a = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
__a = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 )
| 528 |
"""simple docstring"""
def _lowerCamelCase( a ):
return " ".join(
"".join(word[::-1] ) if len(a ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 528 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : List[str] = "sew"
def __init__( self : List[str] , a_ : List[str]=32 , a_ : List[str]=7_68 , a_ : Dict=12 , a_ : Tuple=12 , a_ : Optional[Any]=30_72 , a_ : Optional[Any]=2 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : int=0.0 , a_ : Tuple=0.1 , a_ : Tuple=0.1 , a_ : Tuple=0.02 , a_ : str=1E-5 , a_ : int="group" , a_ : Dict="gelu" , a_ : Tuple=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , a_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_ : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_ : Optional[Any]=False , a_ : Optional[int]=1_28 , a_ : Optional[int]=16 , a_ : str=True , a_ : Optional[int]=0.05 , a_ : str=10 , a_ : Dict=2 , a_ : Union[str, Any]=0.0 , a_ : Dict=10 , a_ : str=0 , a_ : Dict="mean" , a_ : Any=False , a_ : List[str]=False , a_ : Optional[int]=2_56 , a_ : List[Any]=0 , a_ : Tuple=1 , a_ : Union[str, Any]=2 , **a_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
a__ : Any = hidden_size
a__ : Optional[int] = feat_extract_norm
a__ : Dict = feat_extract_activation
a__ : str = list(a_ )
a__ : Tuple = list(a_ )
a__ : List[Any] = list(a_ )
a__ : Union[str, Any] = conv_bias
a__ : Dict = num_conv_pos_embeddings
a__ : Dict = num_conv_pos_embedding_groups
a__ : int = len(self.conv_dim )
a__ : Any = num_hidden_layers
a__ : Optional[Any] = intermediate_size
a__ : int = squeeze_factor
a__ : List[str] = hidden_act
a__ : List[str] = num_attention_heads
a__ : List[Any] = hidden_dropout
a__ : str = attention_dropout
a__ : Optional[Any] = activation_dropout
a__ : Any = feat_proj_dropout
a__ : Optional[int] = final_dropout
a__ : Optional[int] = layerdrop
a__ : List[str] = layer_norm_eps
a__ : int = initializer_range
a__ : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Optional[int] = apply_spec_augment
a__ : Tuple = mask_time_prob
a__ : Tuple = mask_time_length
a__ : int = mask_time_min_masks
a__ : Any = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : Optional[int] = mask_feature_min_masks
# ctc loss
a__ : int = ctc_loss_reduction
a__ : List[str] = ctc_zero_infinity
# sequence classification
a__ : Union[str, Any] = use_weighted_layer_sum
a__ : List[str] = classifier_proj_size
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase__ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
return EnvironmentCommand()
class __UpperCAmelCase ( _UpperCamelCase ):
@staticmethod
def UpperCAmelCase ( a_ : ArgumentParser ) -> List[str]:
'''simple docstring'''
a__ : List[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
a__ : Tuple = huggingface_hub.__version__
a__ : Optional[Any] = "not installed"
a__ : List[str] = "NA"
if is_torch_available():
import torch
a__ : List[str] = torch.__version__
a__ : List[str] = torch.cuda.is_available()
a__ : Union[str, Any] = "not installed"
if is_transformers_available():
import transformers
a__ : List[Any] = transformers.__version__
a__ : str = "not installed"
if is_accelerate_available():
import accelerate
a__ : List[Any] = accelerate.__version__
a__ : str = "not installed"
if is_xformers_available():
import xformers
a__ : List[Any] = xformers.__version__
a__ : List[Any] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def UpperCAmelCase ( a_ : Tuple ) -> Optional[int]:
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 251 | 0 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
def __init__( self , snake_case="sayef/fsner-bert-base-uncased" ):
super(__UpperCamelCase , self ).__init__()
snake_case_ = AutoModel.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
snake_case_ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
snake_case_ = torch.nn.Softmax(dim=1 )
def a ( self , **snake_case ):
return self.bert(**__UpperCamelCase ).last_hidden_state
def a ( self , snake_case ):
return token_embeddings.sum(2 , keepdim=__UpperCamelCase )
def a ( self , snake_case , snake_case , snake_case=1 ):
return self.softmax(T * self.cos(__UpperCamelCase , __UpperCamelCase ) )
def a ( self , snake_case , snake_case ):
snake_case_ = W_supports['sizes'].tolist()
snake_case_ = W_supports['start_token_id'].item()
snake_case_ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ = self.BERT(**__UpperCamelCase )
snake_case_ = self.BERT(**__UpperCamelCase )
snake_case_ = None
snake_case_ = None
snake_case_ = W_supports['input_ids'] == start_token_id
snake_case_ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__UpperCamelCase ):
if i == 0:
snake_case_ = 0
else:
snake_case_ = support_sizes[i - 1]
snake_case_ = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
snake_case_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case_ = torch.vstack((p_starts, p_start) )
snake_case_ = torch.vstack((p_ends, p_end) )
else:
snake_case_ = p_start
snake_case_ = p_end
return p_starts, p_ends
| 362 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Union[str, Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Dict = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : int = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : Optional[int] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 367 | 0 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase , __lowerCAmelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 710 |
'''simple docstring'''
UpperCamelCase : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = False
def A__ ( __lowerCAmelCase : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase__ = chain(next_number(__lowerCAmelCase ) )
lowerCamelCase__ = number_chain
while number < 1000_0000:
lowerCamelCase__ = number_chain
number *= 10
return number_chain
def A__ ( __lowerCAmelCase : int = 1000_0000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowercase__ , lowercase__ ):
if b == 0:
return (1, 0)
((UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = extended_euclid(lowercase__ , a % b )
UpperCAmelCase__ : List[str] = a // b
return (y, x - k * y)
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
((UpperCAmelCase__) , (UpperCAmelCase__)) : int = extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase__ : Optional[int] = na * na
UpperCAmelCase__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case_ ( lowercase__ , lowercase__ ):
((UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase__ : int = (b % n + n) % n
return b
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase__ , UpperCAmelCase__ : Any = invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase__ : str = na * na
UpperCAmelCase__ : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 199 |
'''simple docstring'''
import operator as op
def snake_case_ ( lowercase__ ):
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
UpperCAmelCase__ : Any = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " )
print("-" * (3_0 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " )
else:
UpperCAmelCase__ : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " )
UpperCAmelCase__ : List[Any] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(lowercase__ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 199 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase__ ( snake_case__ ):
_SCREAMING_SNAKE_CASE : List[Any] = """realm"""
def __init__(self : Optional[int] , snake_case_ : str=3_0_5_2_2 , snake_case_ : Any=7_6_8 , snake_case_ : Optional[Any]=1_2_8 , snake_case_ : int=1_2 , snake_case_ : List[str]=1_2 , snake_case_ : Union[str, Any]=8 , snake_case_ : Optional[Any]=3_0_7_2 , snake_case_ : str="gelu_new" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[Any]=5_1_2 , snake_case_ : Optional[int]=2 , snake_case_ : Dict=0.02 , snake_case_ : List[Any]=1E-12 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Optional[Any]=1_0 , snake_case_ : Any=1E-3 , snake_case_ : Tuple=5 , snake_case_ : Optional[int]=3_2_0 , snake_case_ : Dict=1_3_3_5_3_7_1_8 , snake_case_ : List[str]=5_0_0_0 , snake_case_ : Union[str, Any]=1 , snake_case_ : Optional[Any]=0 , snake_case_ : Tuple=2 , **snake_case_ : List[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
# Common config
__a : str = vocab_size
__a : List[Any] = max_position_embeddings
__a : List[Any] = hidden_size
__a : Optional[int] = retriever_proj_size
__a : Optional[int] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Optional[int] = num_candidates
__a : int = intermediate_size
__a : Any = hidden_act
__a : Tuple = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : Optional[Any] = initializer_range
__a : Any = type_vocab_size
__a : str = layer_norm_eps
# Reader config
__a : Any = span_hidden_size
__a : Tuple = max_span_width
__a : Tuple = reader_layer_norm_eps
__a : List[str] = reader_beam_size
__a : Any = reader_seq_len
# Retrieval config
__a : List[Any] = num_block_records
__a : int = searcher_beam_size
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ ={
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Sequence[float] , _UpperCAmelCase : float ):
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Sequence[float] , _UpperCAmelCase : float ):
lowerCAmelCase = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase = result * x + coeff
return result
if __name__ == "__main__":
__UpperCamelCase : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCamelCase : int = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 4 |
'''simple docstring'''
from math import factorial
def __lowerCamelCase ( A__ , A__ , A__ ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
UpperCamelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCamelCase = float(factorial(A__ ) )
coefficient /= factorial(A__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 430 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 721 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = 'xlm-prophetnet'
snake_case_ = ['past_key_values']
snake_case_ = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__( self : Tuple , snake_case : Optional[float] = 0.1 , snake_case : Optional[Union[str, Callable]] = "gelu" , snake_case : Optional[int] = 3_0522 , snake_case : Optional[int] = 1024 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[int] = 4096 , snake_case : Optional[int] = 12 , snake_case : Optional[int] = 16 , snake_case : Optional[float] = 0.1 , snake_case : Optional[float] = 0.1 , snake_case : Optional[int] = 512 , snake_case : Optional[float] = 0.02 , snake_case : Optional[bool] = True , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 2 , snake_case : Optional[int] = 32 , snake_case : Optional[int] = 128 , snake_case : Optional[bool] = False , snake_case : Optional[float] = 0.0 , snake_case : Optional[bool] = True , snake_case : Optional[int] = 0 , snake_case : Optional[int] = 1 , snake_case : Optional[int] = 2 , **snake_case : List[str] , ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = vocab_size
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : Any = encoder_ffn_dim
__magic_name__ : str = num_encoder_layers
__magic_name__ : List[str] = num_encoder_attention_heads
__magic_name__ : Dict = decoder_ffn_dim
__magic_name__ : int = num_decoder_layers
__magic_name__ : str = num_decoder_attention_heads
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = init_std # Normal(0, this parameter)
__magic_name__ : Optional[int] = activation_function
# parameters for xlmprophetnet
__magic_name__ : int = ngram
__magic_name__ : List[Any] = num_buckets
__magic_name__ : int = relative_max_distance
__magic_name__ : List[str] = disable_ngram_loss
__magic_name__ : Union[str, Any] = eps
# 3 Types of Dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : List[Any] = activation_dropout
__magic_name__ : Optional[int] = dropout
__magic_name__ : Dict = use_cache
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_start_token_id=snake_case , **snake_case , )
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self : List[Any] , snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 147 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = ['''pixel_values''']
def __init__( self : int , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_UpperCAmelCase : Any , ):
super().__init__(**_UpperCAmelCase )
_A = size if size is not None else {'shortest_edge': 224}
_A = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_A = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
_A = do_resize
_A = size
_A = resample
_A = do_center_crop
_A = crop_size
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Dict , ):
_A = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_A = int((256 / 224) * size['shortest_edge'] )
_A = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_A = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_UpperCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[Any] , ):
_A = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None , _UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None , _UpperCAmelCase : Optional[TensorType] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Dict , ):
_A = do_resize if do_resize is not None else self.do_resize
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
_A = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_A = [self.resize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_center_crop:
_A = [self.center_crop(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_rescale:
_A = [self.rescale(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_normalize:
_A = [self.normalize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
_A = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_A = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 7 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 30 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
A : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCamelCase__ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
datasets.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = train_dataset.features["label"].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = eval_dataset.features["label"].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = predict_dataset.features["label"].names
# Labels
SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel={str(__UpperCamelCase ): label for i, label in enumerate(__UpperCamelCase )} , labelaid={label: i for i, label in enumerate(__UpperCamelCase )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE_ = False
def preprocess_function(__UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__UpperCamelCase , max_length=data_args.max_seq_length , truncation=__UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ = min(len(__UpperCamelCase ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE_ = train_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
SCREAMING_SNAKE_CASE_ = train_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCamelCase ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ = min(len(__UpperCamelCase ) , data_args.max_eval_samples )
SCREAMING_SNAKE_CASE_ = eval_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
SCREAMING_SNAKE_CASE_ = eval_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE_ = min(len(__UpperCamelCase ) , data_args.max_predict_samples )
SCREAMING_SNAKE_CASE_ = predict_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
SCREAMING_SNAKE_CASE_ = predict_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
SCREAMING_SNAKE_CASE_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = p.predictions[0] if isinstance(p.predictions , __UpperCamelCase ) else p.predictions
SCREAMING_SNAKE_CASE_ = np.argmax(__UpperCamelCase , axis=1 )
return metric.compute(predictions=__UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE_ = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE_ = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ = last_checkpoint
SCREAMING_SNAKE_CASE_ = trainer.train(resume_from_checkpoint=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = train_result.metrics
SCREAMING_SNAKE_CASE_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase )
)
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __UpperCamelCase )
trainer.save_metrics("train" , __UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE_ = trainer.evaluate(eval_dataset=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("eval" , __UpperCamelCase )
trainer.save_metrics("eval" , __UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trainer.predict(__UpperCamelCase , metric_key_prefix="predict" )
SCREAMING_SNAKE_CASE_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase )
)
SCREAMING_SNAKE_CASE_ = min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("predict" , __UpperCamelCase )
trainer.save_metrics("predict" , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.argmax(__UpperCamelCase , axis=1 )
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 356 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase (yaml.SafeLoader ):
"""simple docstring"""
def __A ( self : str , __magic_name__ : str ) -> str:
SCREAMING_SNAKE_CASE_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ = [tuple(__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else key for key in keys]
SCREAMING_SNAKE_CASE_ = Counter(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __A ( self : int , __magic_name__ : int , __magic_name__ : List[str]=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = super().construct_mapping(__magic_name__ , deep=__magic_name__ )
self._check_no_duplicates_on_constructed_node(__magic_name__ )
return mapping
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCamelCase )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __A ( cls : Dict , __magic_name__ : Path ) -> "DatasetMetadata":
with open(__magic_name__ , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__magic_name__ )
else:
return cls()
def __A ( self : str , __magic_name__ : Path ) -> List[str]:
if path.exists():
with open(__magic_name__ , encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self._to_readme(__magic_name__ )
with open(__magic_name__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__magic_name__ )
def __A ( self : Any , __magic_name__ : Optional[str] = None ) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _split_yaml_from_readme(__magic_name__ )
SCREAMING_SNAKE_CASE_ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __A ( cls : List[Any] , __magic_name__ : str ) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE_ = yaml.load(__magic_name__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__magic_name__ )
def __A ( self : Optional[Any] ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__magic_name__ , allow_unicode=__magic_name__ , encoding="utf-8" , ).decode("utf-8" )
A : List[Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A : Optional[Any] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
A : Union[str, Any] = ap.parse_args()
A : Union[str, Any] = Path(args.readme_filepath)
A : List[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 356 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('''backbone.0.body''' ,'''backbone.conv_encoder.model''' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(_lowercase ,_lowercase )
UpperCamelCase = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = F.to_tensor(_lowercase )
UpperCamelCase = F.normalize(_lowercase ,mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase ,_lowercase ,_lowercase )
UpperCamelCase = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase = state_dict.pop(_lowercase )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='''resnet18''' ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='''coco_detection''' ,max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
UpperCamelCase = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase = hf_hub_download(repo_id='''nielsr/example-pdf''' ,repo_type='''dataset''' ,filename=_lowercase )
UpperCamelCase = Image.open(_lowercase ).convert('''RGB''' )
UpperCamelCase = normalize(resize(_lowercase ,_lowercase ) ).unsqueeze(0 )
UpperCamelCase = model(_lowercase )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCamelCase = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCamelCase = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_lowercase ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 34 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 34 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( __A :str ,__A :str ,__A :Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(__A )
return hfh.hf_hub_url(__A ,__A ,repo_type="""dataset""" ,revision=__A )
| 268 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ( ):
"""simple docstring"""
__snake_case = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(__A ) for n in cs]
return dict(zip(__A ,__A ) )
def lowerCamelCase__ ( __A :Dict ):
"""simple docstring"""
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[int]:
"""simple docstring"""
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
__snake_case = json.load(_UpperCamelCase )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
__snake_case = merges_handle.read().split("""\n""" )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a ( self ) -> List[Any]:
"""simple docstring"""
return len(self.encoder )
def a ( self ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__snake_case = tuple(_UpperCamelCase )
__snake_case = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
__snake_case = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(_UpperCamelCase ):
try:
__snake_case = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(_UpperCamelCase )
__snake_case = new_word
if len(_UpperCamelCase ) == 1:
break
else:
__snake_case = get_pairs(_UpperCamelCase )
__snake_case = """ """.join(_UpperCamelCase )
__snake_case = word
return word
def a ( self , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case = []
for token in re.findall(self.pat , _UpperCamelCase ):
__snake_case = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def a ( self , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def a ( self , _UpperCamelCase ) -> Any:
"""simple docstring"""
return self.decoder.get(_UpperCamelCase )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
__snake_case = """""".join(_UpperCamelCase )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case = os.path.join(
_UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + """\n""" )
__snake_case = 0
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__snake_case = token_index
writer.write(""" """.join(_UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
__snake_case = """ """ + text
return (text, kwargs)
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase = None , _UpperCamelCase = None , ) -> dict:
"""simple docstring"""
__snake_case = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs["""global_attention_mask"""] ) != len(_UpperCamelCase )
if needs_to_be_padded:
__snake_case = len(_UpperCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 268 | 1 |
import re
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase_ = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 716 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(__UpperCAmelCase , x % y )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )
def A ( __UpperCAmelCase = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(__UpperCAmelCase , __UpperCAmelCase )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 561 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
A_: List[Any] = logging.get_logger(__name__)
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 398 |
# using dfs for finding eulerian path traversal
def __lowerCAmelCase ( _A ,_A ,_A ,_A=None ):
"""simple docstring"""
_lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowercase , _lowercase = True, True
_lowercase = dfs(_A ,_A ,_A ,_A )
return path
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = 0
_lowercase = -1
for i in range(_A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowercase , _lowercase = check_circuit_or_path(_A ,_A )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_lowercase = 1
if check == 2:
_lowercase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_lowercase = dfs(_A ,_A ,_A )
print(_A )
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowercase = {
1: [],
2: []
# all degree is zero
}
_lowercase = 10
check_euler(_A ,_A )
check_euler(_A ,_A )
check_euler(_A ,_A )
check_euler(_A ,_A )
check_euler(_A ,_A )
if __name__ == "__main__":
main()
| 398 | 1 |
def A__ ( __A ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
_lowerCamelCase : str = sorted(string.lower() )
return len(__A ) == len(set(__A ) )
if __name__ == "__main__":
lowerCAmelCase : Tuple =input("Enter a string ").strip()
lowerCAmelCase : Optional[Any] =is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Any = BertTokenizer
__a : Tuple = BertTokenizerFast
__a : Union[str, Any] = True
__a : int = True
__a : Union[str, Any] = filter_non_english
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : List[str] = 'unwanted, running'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : str = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# With lower casing
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_rust_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.encode(snake_case__ ,add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer(do_lower_case=snake_case__ ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer()
SCREAMING_SNAKE_CASE_ : Any = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE_ : Tuple = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE_ : List[str] = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = i
SCREAMING_SNAKE_CASE_ : List[str] = WordpieceTokenizer(vocab=snake_case__ ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def snake_case ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def snake_case ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def snake_case ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.encode_plus(
snake_case__ ,return_attention_mask=snake_case__ ,return_token_type_ids=snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = tokenizer_r.do_lower_case if hasattr(snake_case__ ,'do_lower_case' ) else False
SCREAMING_SNAKE_CASE_ : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ['的', '人', '有']
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : str = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_p.encode(snake_case__ ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ : List[Any] = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
| 105 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
A_ = get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> List[str]:
'''simple docstring'''
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
logger.info(f'''Saving model to {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=UpperCAmelCase ,storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Model saved to {ckpt_dir}''' )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> List[str]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
SCREAMING_SNAKE_CASE_ = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(UpperCAmelCase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(UpperCAmelCase )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
os.path.join(UpperCAmelCase ,f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCAmelCase ,storage_reader=dist_cp.FileSystemReader(UpperCAmelCase ) ,planner=DefaultLoadPlanner() ,)
SCREAMING_SNAKE_CASE_ = state_dict['''model''']
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> int:
'''simple docstring'''
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(UpperCAmelCase ,UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} ,storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase ) ,planner=DefaultSavePlanner() ,)
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 )-> Any:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(UpperCAmelCase )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_ = (
os.path.join(UpperCAmelCase ,f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() ,optimizer_key='''optimizer''' ,storage_reader=dist_cp.FileSystemReader(UpperCAmelCase ) ,)
SCREAMING_SNAKE_CASE_ = optim_state['''optimizer''']
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
optimizer.load_state_dict(UpperCAmelCase )
| 393 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase = 200 ) -> int:
'''simple docstring'''
lowerCamelCase__ =[1, 2, 5, 10, 20, 50, 100, 200]
lowerCamelCase__ =[0] * (pence + 1)
lowerCamelCase__ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 132 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_lowercase : Optional[int] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowerCamelCase__ ( A : str , A : str , A : Union[str, Any] , A : List[str] , A : Optional[int]=False , A : Optional[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCAmelCase = cached_file(A , A , force_download=not use_cached_models )
UpperCAmelCase = config_class.from_json_file(A )
UpperCAmelCase = True
UpperCAmelCase = True
print(f"""Building TensorFlow model from configuration: {config}""" )
UpperCAmelCase = model_class(A )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCAmelCase = cached_file(
A , A , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(A , A )
if compare_with_pt_model:
UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=A ) # build the network
UpperCAmelCase = torch.load(A , map_location='''cpu''' )
UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
with torch.no_grad():
UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
UpperCAmelCase = pto[0].numpy()
UpperCAmelCase = tfo[0].numpy()
UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(f"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, f"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(f"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(A , save_format='''h5''' )
def lowerCamelCase__ ( A : Any , A : Dict , A : List[Any]=None , A : int=None , A : List[Any]=False , A : Optional[Any]=False , A : str=False , A : Optional[int]=False , ):
'''simple docstring'''
if args_model_type is None:
UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(A , start=1 ):
print('''=''' * 1_00 )
print(f""" Converting model type {j}/{len(A )}: {model_type}""" )
print('''=''' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A , A ) , start=1 ):
print('''-''' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(f""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
f""" Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}""" )
print('''-''' * 1_00 )
if config_shortcut_name in aws_config_map:
UpperCAmelCase = cached_file(A , A , force_download=not use_cached_models )
else:
UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCAmelCase = cached_file(A , A , force_download=not use_cached_models )
else:
UpperCAmelCase = model_shortcut_name
if os.path.isfile(A ):
UpperCAmelCase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=A , )
if remove_cached_files:
os.remove(A )
os.remove(A )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
_lowercase : int = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 210 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase : Optional[str] = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase : bool = field(default=a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase : bool = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
UpperCAmelCase = import_module('tasks' )
try:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
UpperCAmelCase , UpperCAmelCase = preds.shape
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
UpperCAmelCase , UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
UpperCAmelCase = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 447 | 0 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> list:
_lowercase : List[str] = len(lowerCamelCase_ )
_lowercase : List[Any] = [[0] * n for i in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
_lowercase : int = y_points[i]
for i in range(2 , lowerCamelCase_ ):
for j in range(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
], dtype=tf.floataa, )
_lowercase : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above
_lowercase : Optional[Any] = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3], dtype=tf.floataa, ) # expected non filtered values as noted above
_lowercase : Optional[int] = tf_top_k_top_p_filtering(lowerCamelCase, top_k=10, top_p=0.6, min_tokens_to_keep=4)
_lowercase : int = output[output != -float('inf')]
_lowercase : List[str] = tf.cast(
tf.where(tf.not_equal(lowerCamelCase, tf.constant(-float('inf'), dtype=tf.floataa))), dtype=tf.intaa, )
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, rtol=1E-12)
tf.debugging.assert_equal(lowerCamelCase, lowerCamelCase)
@require_tf
class _lowerCamelCase( unittest.TestCase, _a ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase_ : Optional[Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : Any = 2
_lowercase : Tuple = 2
class _lowerCamelCase( tf.Module ):
def __init__( self, lowerCamelCase) -> int:
"""simple docstring"""
super(lowerCamelCase, self).__init__()
_lowercase : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length), tf.intaa, name='input_ids'),
tf.TensorSpec((None, input_length), tf.intaa, name='attention_mask'),
), jit_compile=lowerCamelCase, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : List[str] = self.model.generate(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, max_new_tokens=lowerCamelCase, return_dict_in_generate=lowerCamelCase, )
return {"sequences": outputs["sequences"]}
_lowercase : List[Any] = [[2, 0], [1_02, 1_03]]
_lowercase : Tuple = [[1, 0], [1, 1]]
_lowercase : Dict = DummyModel(model=lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase, lowerCamelCase, signatures={'serving_default': dummy_model.serving})
_lowercase : Dict = tf.saved_model.load(lowerCamelCase).signatures['serving_default']
for batch_size in range(1, len(lowerCamelCase) + 1):
_lowercase : int = {
'input_ids': tf.constant(dummy_input_ids[:batch_size]),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size]),
}
_lowercase : Optional[int] = serving_func(**lowerCamelCase)['sequences']
_lowercase : Optional[Any] = test_model.generate(**lowerCamelCase, max_new_tokens=lowerCamelCase)
tf.debugging.assert_equal(lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : List[str] = 1
_lowercase : Tuple = 2
class _lowerCamelCase( tf.Module ):
def __init__( self, lowerCamelCase) -> Any:
"""simple docstring"""
super(lowerCamelCase, self).__init__()
_lowercase : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None), tf.intaa, name='input_ids'),
tf.TensorSpec((batch_size, None), tf.intaa, name='attention_mask'),
), jit_compile=lowerCamelCase, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self.model.generate(
input_ids=lowerCamelCase, attention_mask=lowerCamelCase, max_new_tokens=lowerCamelCase, return_dict_in_generate=lowerCamelCase, )
return {"sequences": outputs["sequences"]}
_lowercase : Optional[int] = [[2], [1_02, 1_03]]
_lowercase : List[Any] = [[1], [1, 1]]
_lowercase : Tuple = DummyModel(model=lowerCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCamelCase, lowerCamelCase, signatures={'serving_default': dummy_model.serving})
_lowercase : Optional[int] = tf.saved_model.load(lowerCamelCase).signatures['serving_default']
for input_row in range(len(lowerCamelCase)):
_lowercase : Dict = {
'input_ids': tf.constant([dummy_input_ids[input_row]]),
'attention_mask': tf.constant([dummy_attention_masks[input_row]]),
}
_lowercase : List[Any] = serving_func(**lowerCamelCase)['sequences']
_lowercase : List[str] = test_model.generate(**lowerCamelCase, max_new_tokens=lowerCamelCase)
tf.debugging.assert_equal(lowerCamelCase, lowerCamelCase)
@slow
@require_tensorflow_text
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small', filename='spiece.model', local_dir=lowerCamelCase)
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self) -> Dict:
"""simple docstring"""
super().__init__()
_lowercase : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCamelCase, 'spiece.model'), 'rb').read())
_lowercase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5')
def UpperCamelCase ( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.tokenizer.tokenize(lowerCamelCase)
_lowercase , _lowercase : Tuple = text.pad_model_inputs(
lowerCamelCase, max_seq_length=64, pad_value=self.model.config.pad_token_id)
_lowercase : Tuple = self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)
return self.tokenizer.detokenize(lowerCamelCase)
_lowercase : Tuple = CompleteSentenceTransformer()
_lowercase : Optional[Any] = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='inputs')
_lowercase : Tuple = complete_model(lowerCamelCase)
_lowercase : Union[str, Any] = tf.keras.Model(lowerCamelCase, lowerCamelCase)
keras_model.save(lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_lowercase : Tuple = 14
_lowercase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : Any = 'Hello, my dog is cute and'
_lowercase : List[Any] = tokenizer(lowerCamelCase, return_tensors='tf')
_lowercase : Optional[int] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
_lowercase : Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
_lowercase : Tuple = model.generate(**lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
_lowercase : Tuple = [6_38, 1_98]
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
_lowercase : Optional[Any] = model.generate(**lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart')
_lowercase : Any = 'Hugging Face is a technology company based in New York and Paris.'
_lowercase : Optional[int] = bart_tokenizer(lowerCamelCase, return_tensors='tf').input_ids
_lowercase : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart')
_lowercase : Union[str, Any] = bart_model.generate(lowerCamelCase).numpy()
class _lowerCamelCase( _a ):
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> int:
"""simple docstring"""
return super().call(lowerCamelCase, **lowerCamelCase)
_lowercase : Any = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart')
_lowercase : Optional[int] = bart_model.generate(lowerCamelCase, foo='bar').numpy()
self.assertTrue(np.array_equal(lowerCamelCase, lowerCamelCase))
class _lowerCamelCase( bart_model.model.encoder.__class__ ):
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().call(lowerCamelCase, **lowerCamelCase)
_lowercase : List[Any] = FakeEncoder(bart_model.config, bart_model.model.shared)
_lowercase : int = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowercase : Dict = bart_model.generate(lowerCamelCase).numpy()
with self.assertRaises(lowerCamelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCamelCase, foo='bar')
| 354 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionPanoramaPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase__ : Optional[Any] = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase__ : Any = CLIPTextModel(_A )
UpperCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : Any , _A : Optional[Any] , _A : Dict=0 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = torch.manual_seed(_A )
UpperCAmelCase__ : Optional[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionPanoramaPipeline(**_A )
UpperCAmelCase__ : int = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : int = sd_pipe(**_A ).images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Dict = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : int ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
UpperCAmelCase__ : Dict = StableDiffusionPanoramaPipeline(**_A )
UpperCAmelCase__ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : List[Any] = '''french fries'''
UpperCAmelCase__ : Tuple = sd_pipe(**_A , negative_prompt=_A )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionPanoramaPipeline(**_A )
UpperCAmelCase__ : Optional[int] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : str = self.get_dummy_inputs(_A )
UpperCAmelCase__ : int = sd_pipe(**_A , view_batch_size=2 )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
UpperCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline(**_A )
UpperCAmelCase__ : Union[str, Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Optional[int] = sd_pipe(**_A ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : List[str] = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=_A )
UpperCAmelCase__ : Optional[int] = StableDiffusionPanoramaPipeline(**_A )
UpperCAmelCase__ : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Optional[int] = sd_pipe(**_A ).images
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : int , _A : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase__ : str = torch.manual_seed(_A )
UpperCAmelCase__ : str = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase__ : Optional[Any] = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = self.get_inputs()
UpperCAmelCase__ : Dict = pipe(**_A ).images
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase__ : Optional[Any] = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_A )
UpperCAmelCase__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Union[str, Any] = self.get_inputs()
UpperCAmelCase__ : Any = pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
UpperCAmelCase__ : int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
def callback_fn(_A : int , _A : int , _A : torch.FloatTensor ) -> None:
UpperCAmelCase__ : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase__ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase__ : str = latents[0, -3:, -3:, -1]
UpperCAmelCase__ : Tuple = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase__ : int = latents[0, -3:, -3:, -1]
UpperCAmelCase__ : Union[str, Any] = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
UpperCAmelCase__ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Optional[Any] = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self : Tuple ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
UpperCAmelCase__ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Dict = self.get_inputs()
UpperCAmelCase__ : Optional[Any] = pipe(**_A )
UpperCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 75 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[str] = GPTaTokenizer
lowerCamelCase : Optional[int] = GPTaTokenizerFast
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = {"add_prefix_space": True}
lowerCamelCase : Optional[int] = False
def __lowercase ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__snake_case :Dict = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case :List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case :List[str] = {"""unk_token""": """<unk>"""}
__snake_case :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __lowercase ( self , **a__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , **a__ ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[Any] = """lower newer"""
__snake_case :Any = """lower newer"""
return input_text, output_text
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case :List[Any] = """lower newer"""
__snake_case :int = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__snake_case :Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
__snake_case :List[Any] = tokens + [tokenizer.unk_token]
__snake_case :List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case :Union[str, Any] = self.get_tokenizer()
__snake_case :Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case :Optional[int] = """lower newer"""
# Testing tokenization
__snake_case :List[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
__snake_case :int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case :List[str] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
__snake_case :int = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case :Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=a__ )
__snake_case :Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
__snake_case :Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
__snake_case :Dict = tokens + [rust_tokenizer.unk_token]
__snake_case :Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __lowercase ( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
pass
def __lowercase ( self , a__=15 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case :Optional[int] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
__snake_case :List[str] = """This is a simple input"""
__snake_case :List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case :Tuple = ("""This is a simple input""", """This is a pair""")
__snake_case :Union[str, Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__snake_case :Tuple = """This is a simple input"""
__snake_case :Optional[int] = ["""This is a simple input looooooooong""", """This is a simple input"""]
__snake_case :List[str] = ("""This is a simple input""", """This is a pair""")
__snake_case :List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__snake_case :Tuple = tokenizer.pad_token_id
__snake_case :int = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__snake_case :Optional[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
__snake_case :List[Any] = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__snake_case :Optional[int] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[int] = """$$$"""
__snake_case :Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
__snake_case :List[Any] = """This is a simple input"""
__snake_case :Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case :Tuple = tokenizer.bos_token_id
__snake_case :List[str] = tokenizer(a__ )
__snake_case :Any = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__snake_case :int = tokenizer.decode(out_s.input_ids )
__snake_case :Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__snake_case :Tuple = """Encode this."""
__snake_case :Tuple = """This one too please."""
__snake_case :Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :List[str] = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
__snake_case :Union[str, Any] = encoded_sequence_dict["""input_ids"""]
__snake_case :Optional[Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(a__ ) , len(a__ ) )
__snake_case :Optional[int] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
__snake_case :str = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Union[str, Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
__snake_case :Union[str, Any] = """A photo of a cat"""
__snake_case :int = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""test_opt""" )
__snake_case :int = AutoTokenizer.from_pretrained("""./test_opt""" )
__snake_case :Tuple = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Tuple = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=a__ )
__snake_case :str = """A photo of a cat"""
__snake_case :List[str] = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
__snake_case :Optional[Any] = """bos"""
__snake_case :int = tokenizer.get_vocab()["""bos"""]
__snake_case :str = """A photo of a cat"""
__snake_case :int = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""./tok""" )
__snake_case :Dict = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
__snake_case :List[Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 455 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : int = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = AlbertTokenizer
__lowerCamelCase : Tuple = AlbertTokenizerFast
__lowerCamelCase : Any = True
__lowerCamelCase : Dict = True
__lowerCamelCase : Dict = True
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Dict = AlbertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = 'this is a test'
snake_case : List[str] = 'this is a test'
return input_text, output_text
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = '<pad>'
snake_case : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""▁eloquent""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,30000 )
def snake_case_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,30000 )
def snake_case_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : List[str] = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = 'I was born in 92000, and this is falsé.'
snake_case : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = AlbertTokenizer(SCREAMING_SNAKE_CASE_ ,keep_accents=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) ,[48, 25, 21, 1289] )
snake_case : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ ,["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
snake_case : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ ,[31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ ,["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = AlbertTokenizer(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = tokenizer.encode("""sequence builders""" )
snake_case : str = tokenizer.encode("""multi-sequence build""" )
snake_case : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ ,model_name="""albert-base-v2""" ,revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" ,)
| 703 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase ( __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowercase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowercase ( __A : str ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") )
return token
def lowercase ( ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def lowercase ( __A : int , __A : Union[str, Any] , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
snake_case : Dict = """imagenet-1k-id2label.json"""
snake_case : Tuple = 1000
snake_case : List[Any] = """huggingface/label-files"""
snake_case : List[str] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="""dataset""" ) ) , """r""" ) )
snake_case : str = {int(__A ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Optional[Any] = CvtConfig(num_labels=__A , idalabel=__A , labelaid=__A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case : Dict = [2, 2, 20]
snake_case : List[str] = [3, 12, 16]
snake_case : int = [192, 768, 1024]
snake_case : Union[str, Any] = CvtForImageClassification(__A )
snake_case : int = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case : Union[str, Any] = image_size
snake_case : Dict = torch.load(__A , map_location=torch.device("""cpu""" ) )
snake_case : List[str] = OrderedDict()
snake_case : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case : Optional[int] = list_of_state_dict + cls_token(__A )
snake_case : Dict = list_of_state_dict + embeddings(__A )
for cnt in range(config.depth[idx] ):
snake_case : Any = list_of_state_dict + attention(__A , __A )
snake_case : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__A )
for i in range(len(__A ) ):
snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 315 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.