code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
a_ : Optional[Any] = [0, 2, 4, 6, 8]
a_ : str = [1, 3, 5, 7, 9]
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_a = 0
for digit in range(10 ):
_a = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCAmelCase__ , lowerCAmelCase__ )
return result
_a = 0
for digita in range(10 ):
_a = digita
if (remainder + digita) % 2 == 0:
_a = ODD_DIGITS
else:
_a = EVEN_DIGITS
for digita in other_parity_digits:
_a = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase__ , lowerCAmelCase__ , )
return result
def _A (lowerCAmelCase__ :int = 9 ) -> int:
'''simple docstring'''
_a = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCAmelCase__ , 0 , [0] * length , lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a_ : str = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
a_ : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a_ : Any = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a_ : Union[str, Any] = sorted(arg_to_scheduler.keys())
a_ : List[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class a ( pl.LightningModule ):
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__="base" , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> List[str]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__magic_name__ )
_a = 0
_a = Path(self.hparams.output_dir )
_a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , )
else:
_a = config
_a = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __magic_name__ , __magic_name__ ):
assert hasattr(self.config , __magic_name__ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) )
if tokenizer is None:
_a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , )
else:
_a = tokenizer
_a = MODEL_MODES[mode]
if model is None:
_a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , )
else:
_a = model
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
_a = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = arg_to_scheduler[self.hparams.lr_scheduler]
_a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_a = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ) -> Any:
_a = self.model
_a = ['bias', 'LayerNorm.weight']
_a = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_a = Adafactor(
__magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ )
else:
_a = AdamW(
__magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_a = optimizer
_a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
return self.validation_step(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
return self.validation_end(__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
if stage == "test":
_a = len(self.test_dataloader().dataset )
else:
_a = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__magic_name__ )
_a = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> int:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ) -> Tuple:
return self.train_loader
def __UpperCAmelCase ( self ) -> Dict:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
_a = self.output_dir.joinpath('best_tfmr' )
_a = self.step_count
self.model.save_pretrained(__magic_name__ )
self.tokenizer.save_pretrained(__magic_name__ )
@staticmethod
def __UpperCAmelCase ( __magic_name__ , __magic_name__ ) -> Optional[int]:
parser.add_argument(
'--model_name_or_path' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__magic_name__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__magic_name__ , type=__magic_name__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__magic_name__ ).parent / 'test_run' / 'cache' ) , type=__magic_name__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__magic_name__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__magic_name__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__magic_name__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__magic_name__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__magic_name__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__magic_name__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__magic_name__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__magic_name__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__magic_name__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__magic_name__ )
parser.add_argument('--train_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--eval_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--adafactor' , action='store_true' )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__magic_name__ )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = trainer.lr_schedulers[0]['scheduler']
_a = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
rank_zero_info('***** Validation results *****' )
_a = trainer.callback_metrics
# Log results
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
rank_zero_info('***** Test results *****' )
_a = trainer.callback_metrics
# Log and save results to file
_a = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__magic_name__ , 'w' ) as writer:
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> None:
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase__ , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase__ )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase__ , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase__ , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase__ , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _A (lowerCAmelCase__ :BaseTransformer , lowerCAmelCase__ :argparse.Namespace , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=[] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :List[str] , ) -> str:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
_a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
_a = LoggingCallback()
_a = {}
if args.fpaa:
_a = 16
if args.gpus > 1:
_a = 'auto'
_a = 'ddp'
_a = args.accumulate_grad_batches
_a = None
_a = 'auto'
_a = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 168 | 1 |
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCamelCase__ : int = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCamelCase__ : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
SCREAMING_SNAKE_CASE__ = BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : List[str] , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def SCREAMING_SNAKE_CASE ( self : Dict , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = processor(text=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = """lower newer"""
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 165 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : str = logging.get_logger(__name__)
# TODO: upload to AWS
A_ : Optional[int] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Any = 'retribert'
def __init__( self : Tuple , __UpperCAmelCase : Optional[Any]=3_0_5_2_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : List[Any]=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : List[Any]=5_1_2 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Any=1e-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=1_2_8 , __UpperCAmelCase : Tuple=0 , **__UpperCAmelCase : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = share_encoders
SCREAMING_SNAKE_CASE__ = projection_dim
| 165 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__lowercase = len(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
__lowercase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase = True
for i in range(lowerCamelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase = True
if a[i].islower():
__lowercase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_SCREAMING_SNAKE_CASE = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowerCamelCase_ )
# ffn -> feed_forward
__lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : int=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 5_0_2_7_7
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 217 | 0 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 37 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
A__ = len(_lowerCamelCase ) + 1
A__ = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
A__ = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
A__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowerCamelCase ):
A__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowerCamelCase ):
A__ = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowerCamelCase ):
for j in range(1 , _lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
A__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
A__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
A__ = dp[i - 1][j]
else:
A__ = 0
else:
A__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase : List[Any] ="aab"
__lowerCAmelCase : Optional[int] ="c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 123 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
A__ = 10_24
A__ = 40_96
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [2_56, 5_12, 10_24, 10_24]
A__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = [2_56, 5_12, 7_68, 7_68]
A__ = 1_50
A__ = 16
A__ = (1, 3_84, 3_84)
A__ = False
A__ = "project"
if "ade" in checkpoint_url:
A__ = True
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = 1_50
A__ = 16
A__ = "huggingface/label-files"
A__ = "ade20k-id2label.json"
A__ = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
A__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
A__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
A__ = name.replace("patch_embed" , "" )
if "pos_embed" in name:
A__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
A__ = name.replace("proj" , "projection" )
if "blocks" in name:
A__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
A__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
A__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
A__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
A__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
A__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
A__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
A__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
A__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
A__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
A__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
A__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
A__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
A__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
A__ = name.replace("bn" , "batch_norm" )
if "head" in name:
A__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
A__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
A__ = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
A__ = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
A__ = name.replace(".." , "." )
if "stem.conv" in name:
A__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
A__ = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
A__ = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
A__ = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
A__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
A__ = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
A__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str ):
A__, A__ = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(_lowerCamelCase )
A__ = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(_lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
A__ = 4_80 if "ade" in checkpoint_url else 3_84
A__ = DPTImageProcessor(size=_lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(_lowerCamelCase , return_tensors="pt" )
# forward pass
A__ = model(**_lowerCamelCase ).logits if "ade" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
if show_prediction:
A__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase__ = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCAmelCase__ = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCAmelCase__ = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def snake_case_ (self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None , __a="warn" , ) -> str:
UpperCamelCase = recall_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a , zero_division=__a , )
return {"recall": float(__a ) if score.size == 1 else score}
| 153 |
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
UpperCamelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 153 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 1 |
def A (__A : int , __A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def A () -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 51 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''codegen'''
UpperCAmelCase__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=50400 , _snake_case : Optional[int]=2048 , _snake_case : Union[str, Any]=2048 , _snake_case : List[str]=4096 , _snake_case : Any=28 , _snake_case : List[str]=16 , _snake_case : int=64 , _snake_case : Tuple=None , _snake_case : Dict="gelu_new" , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Optional[Any]=True , _snake_case : int=50256 , _snake_case : Tuple=50256 , _snake_case : int=False , **_snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_ctx
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case)
class __snake_case ( a ):
def __init__( self : Tuple , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case)
if not getattr(self._config , '''pad_token_id''' , _snake_case):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
UpperCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = super(_snake_case , self).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(self.num_layers)
]
UpperCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 51 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : Any=7 , lowercase_ : Optional[int]=3 , lowercase_ : Union[str, Any]=18 , lowercase_ : Tuple=30 , lowercase_ : List[Any]=400 , lowercase_ : str=True , lowercase_ : List[str]=32 , lowercase_ : Optional[int]=True , ):
UpperCamelCase__ : List[Any] =parent
UpperCamelCase__ : int =batch_size
UpperCamelCase__ : List[Any] =num_channels
UpperCamelCase__ : Tuple =image_size
UpperCamelCase__ : str =min_resolution
UpperCamelCase__ : Tuple =max_resolution
UpperCamelCase__ : Optional[Any] =do_resize
UpperCamelCase__ : List[Any] =size_divisor
UpperCamelCase__ : List[str] =do_rescale
def _lowerCAmelCase ( self : int ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = GLPNImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Tuple =GLPNImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ , '''size_divisor''' ) )
self.assertTrue(hasattr(lowercase_ , '''resample''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_rescale''' ) )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : str ):
# Initialize image_processing
UpperCamelCase__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self : Tuple ):
# Initialize image_processing
UpperCamelCase__ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self : int ):
# Initialize image_processing
UpperCamelCase__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 351 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
"""simple docstring"""
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=99 , lowercase_ : Optional[Any]=13 , lowercase_ : Tuple=7 , lowercase_ : Any=9 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : str=32 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=37 , lowercase_ : int=8 , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.0_0_2 , lowercase_ : Any=1 , lowercase_ : Tuple=0 , lowercase_ : Any=0 , lowercase_ : Optional[Any]=None , lowercase_ : str=None , ):
UpperCamelCase__ : Optional[int] =parent
UpperCamelCase__ : int =batch_size
UpperCamelCase__ : Tuple =encoder_seq_length
UpperCamelCase__ : List[Any] =decoder_seq_length
# For common tests
UpperCamelCase__ : str =self.decoder_seq_length
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : Optional[int] =use_attention_mask
UpperCamelCase__ : Union[str, Any] =use_labels
UpperCamelCase__ : List[str] =vocab_size
UpperCamelCase__ : Union[str, Any] =hidden_size
UpperCamelCase__ : Any =num_hidden_layers
UpperCamelCase__ : Optional[int] =num_attention_heads
UpperCamelCase__ : str =d_ff
UpperCamelCase__ : Union[str, Any] =relative_attention_num_buckets
UpperCamelCase__ : Dict =dropout_rate
UpperCamelCase__ : Dict =initializer_factor
UpperCamelCase__ : str =eos_token_id
UpperCamelCase__ : List[str] =pad_token_id
UpperCamelCase__ : List[str] =decoder_start_token_id
UpperCamelCase__ : Optional[Any] =None
UpperCamelCase__ : int =decoder_layers
def _lowerCAmelCase ( self : List[str] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , lowercase_ : Any=None , ):
if attention_mask is None:
UpperCamelCase__ : List[str] =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ : Union[str, Any] =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ : List[Any] =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
UpperCamelCase__ : List[Any] =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
UpperCamelCase__ : Any =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Dict =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase__ : Any =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ : Tuple =input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : Tuple =decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[str] =self.get_config()
UpperCamelCase__ : int =config.num_attention_heads
UpperCamelCase__ : List[Any] =self.prepare_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, input_dict
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ , UpperCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Optional[int] ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : Any ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , ):
UpperCamelCase__ : int =UMTaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : str =model(
input_ids=lowercase_ , decoder_input_ids=lowercase_ , attention_mask=lowercase_ , decoder_attention_mask=lowercase_ , )
UpperCamelCase__ : Union[str, Any] =model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCamelCase__ : List[str] =result.last_hidden_state
UpperCamelCase__ : str =result.past_key_values
UpperCamelCase__ : Any =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , ):
UpperCamelCase__ : Any =UMTaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
# first forward pass
UpperCamelCase__ : List[Any] =model(lowercase_ , use_cache=lowercase_ )
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
UpperCamelCase__ : Dict =model(lowercase_ , use_cache=lowercase_ )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) + 1 )
UpperCamelCase__ , UpperCamelCase__ : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : List[Any] =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase__ : Union[str, Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Optional[int] =model(lowercase_ )['''last_hidden_state''']
UpperCamelCase__ : Dict =model(lowercase_ , past_key_values=lowercase_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : List[str] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Any =output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : Dict =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , ):
UpperCamelCase__ : Tuple =UMTaModel(config=lowercase_ ).to(lowercase_ ).half().eval()
UpperCamelCase__ : Any =model(**lowercase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowercase_ ).any().item() )
@require_torch
class __a ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ = [0.8, 0.9]
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Optional[int] =UMTaModel(config_and_inputs[0] ).to(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=lowercase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Dict =['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : str =config_and_inputs[0]
UpperCamelCase__ : Tuple =UMTaForConditionalGeneration(lowercase_ ).eval()
model.to(lowercase_ )
UpperCamelCase__ : Dict ={
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowercase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
}
for attn_name, (name, mask) in zip(lowercase_ , head_masking.items() ):
UpperCamelCase__ : Optional[int] ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase__ : Tuple =torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase_ )
UpperCamelCase__ : str =model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowercase_ , return_dict_in_generate=lowercase_ , **lowercase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase__ : Union[str, Any] =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _lowerCAmelCase ( self : Any ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Optional[int] =UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowercase_ ).to(lowercase_ )
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowercase_ , legacy=lowercase_ )
UpperCamelCase__ : int =[
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCamelCase__ : Optional[int] =tokenizer(lowercase_ , return_tensors='''pt''' , padding=lowercase_ ).input_ids
# fmt: off
UpperCamelCase__ : int =torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =model.generate(input_ids.to(lowercase_ ) )
UpperCamelCase__ : int =[
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCamelCase__ : Optional[Any] =tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 157 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 600_851_475_143 ):
try:
_UpperCAmelCase : int = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
_UpperCAmelCase : Tuple = i
n //= i
i += 1
if n > 1:
_UpperCAmelCase : str = n
return int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = """deformable_detr"""
SCREAMING_SNAKE_CASE_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , UpperCamelCase__ : int=True , UpperCamelCase__ : str=None , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3_0_0 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=1_0_2_4 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : str=6 , UpperCamelCase__ : str=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]="sine" , UpperCamelCase__ : Any="resnet50" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=3_0_0 , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[Any]=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.25 , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Dict , )-> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__lowerCAmelCase: List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: int = backbone_config.get("model_type")
__lowerCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase: Any = config_class.from_dict(UpperCamelCase__)
__lowerCAmelCase: int = use_timm_backbone
__lowerCAmelCase: Any = backbone_config
__lowerCAmelCase: Tuple = num_channels
__lowerCAmelCase: str = num_queries
__lowerCAmelCase: List[str] = max_position_embeddings
__lowerCAmelCase: List[Any] = d_model
__lowerCAmelCase: Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: List[str] = encoder_attention_heads
__lowerCAmelCase: Any = decoder_ffn_dim
__lowerCAmelCase: Union[str, Any] = decoder_layers
__lowerCAmelCase: List[Any] = decoder_attention_heads
__lowerCAmelCase: List[Any] = dropout
__lowerCAmelCase: Optional[Any] = attention_dropout
__lowerCAmelCase: Union[str, Any] = activation_dropout
__lowerCAmelCase: Union[str, Any] = activation_function
__lowerCAmelCase: Dict = init_std
__lowerCAmelCase: int = init_xavier_std
__lowerCAmelCase: str = encoder_layerdrop
__lowerCAmelCase: Union[str, Any] = auxiliary_loss
__lowerCAmelCase: List[Any] = position_embedding_type
__lowerCAmelCase: str = backbone
__lowerCAmelCase: Tuple = use_pretrained_backbone
__lowerCAmelCase: int = dilation
# deformable attributes
__lowerCAmelCase: Union[str, Any] = num_feature_levels
__lowerCAmelCase: Optional[Any] = encoder_n_points
__lowerCAmelCase: Dict = decoder_n_points
__lowerCAmelCase: Optional[Any] = two_stage
__lowerCAmelCase: Tuple = two_stage_num_proposals
__lowerCAmelCase: int = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
__lowerCAmelCase: str = class_cost
__lowerCAmelCase: List[str] = bbox_cost
__lowerCAmelCase: List[str] = giou_cost
# Loss coefficients
__lowerCAmelCase: Tuple = mask_loss_coefficient
__lowerCAmelCase: int = dice_loss_coefficient
__lowerCAmelCase: Any = bbox_loss_coefficient
__lowerCAmelCase: str = giou_loss_coefficient
__lowerCAmelCase: int = eos_coefficient
__lowerCAmelCase: Tuple = focal_alpha
__lowerCAmelCase: Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__)
@property
def lowercase_ ( self : List[Any])-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
return self.d_model
def lowercase_ ( self : Union[str, Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCAmelCase: str = self.backbone_config.to_dict()
__lowerCAmelCase: Tuple = self.__class__.model_type
return output
| 217 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = filter(lambda __lowerCamelCase : p.requires_grad, model.parameters() )
SCREAMING_SNAKE_CASE_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase = logging.getLogger(__name__)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
if metric == "rouge2":
SCREAMING_SNAKE_CASE_ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE_ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE_ = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE_ = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
SCREAMING_SNAKE_CASE_ = ModelCheckpoint(
dirpath=__lowerCamelCase, filename=__lowerCamelCase, monitor=F'''val_{metric}''', mode='''max''', save_top_k=1, every_n_epochs=1, )
return checkpoint_callback
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return EarlyStopping(
monitor=F'''val_{metric}''', mode='''min''' if '''loss''' in metric else '''max''', patience=__lowerCamelCase, verbose=__lowerCamelCase, )
class UpperCamelCase__ ( pl.Callback ):
"""simple docstring"""
def _UpperCamelCase ( self , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def _UpperCamelCase ( self , _A , _A , _A , _A=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
SCREAMING_SNAKE_CASE_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE_ = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE_ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE_ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
SCREAMING_SNAKE_CASE_ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , '''a+''' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE_ = metrics[key]
if isinstance(_A , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = val.item()
SCREAMING_SNAKE_CASE_ = F'''{key}: {val:.6f}\n'''
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE_ = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_A )
@rank_zero_only
def _UpperCamelCase ( self , _A , _A ) -> str:
try:
SCREAMING_SNAKE_CASE_ = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE_ = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE_ = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , '''test''' )
@rank_zero_only
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 257 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ) -> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_pad
def _UpperCamelCase ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCamelCase ( self , _A , _A=False ) -> str:
if not batched:
SCREAMING_SNAKE_CASE_ = image_inputs[0]
if isinstance(_A , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[0] )[0]
SCREAMING_SNAKE_CASE_ = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _A )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _A )
def _UpperCamelCase ( self ) -> Any:
pass
def _UpperCamelCase ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_A , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCamelCase ( self ) -> str:
# prepare image and target
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''image_id''': 39769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
@slow
def _UpperCamelCase ( self ) -> Tuple:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) )
# verify boxes
SCREAMING_SNAKE_CASE_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) )
# verify masks
SCREAMING_SNAKE_CASE_ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A )
# verify orig_size
SCREAMING_SNAKE_CASE_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) )
# verify size
SCREAMING_SNAKE_CASE_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
| 257 | 1 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 123 |
from __future__ import annotations
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : Optional[int] = "MIT"
_snake_case : Union[str, Any] = "1.0.0"
_snake_case : Optional[Any] = "Muhammad Umer Farooq"
_snake_case : List[Any] = "contact@muhammadumerfarooq.me"
_snake_case : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : str ) -> None:
super().__init__()
__snake_case : list[str] = []
__snake_case : Any = domain
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Any = parse.urljoin(self.domain , lowerCamelCase )
self.urls.append(lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] )
def lowerCAmelCase_ ( __lowerCamelCase ):
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCAmelCase_ ( __lowerCamelCase = "https://github.com" ):
__snake_case : Tuple = get_domain_name(__lowerCamelCase )
# Initialize the parser
__snake_case : Dict = Parser(__lowerCamelCase )
try:
# Open URL
__snake_case : Any = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[str] = requests.get(__lowerCamelCase )
# Get the valid email.
__snake_case : Any = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 123 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def _A ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowercase =[(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase =''
else:
__lowercase ='deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase =state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__lowercase =state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase =in_proj_weight[
: config.hidden_size, :
]
__lowercase =in_proj_bias[: config.hidden_size]
__lowercase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase =in_proj_weight[
-config.hidden_size :, :
]
__lowercase =in_proj_bias[-config.hidden_size :]
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =dct.pop(_lowerCAmelCase )
__lowercase =val
def _A ( ):
"""simple docstring"""
__lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =DeiTConfig()
# all deit models have fine-tuned heads
__lowercase =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowercase =1_000
__lowercase ='huggingface/label-files'
__lowercase ='imagenet-1k-id2label.json'
__lowercase =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =int(deit_name[-6:-4] )
__lowercase =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__lowercase =192
__lowercase =768
__lowercase =12
__lowercase =3
elif deit_name[9:].startswith('small' ):
__lowercase =384
__lowercase =1_536
__lowercase =12
__lowercase =6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__lowercase =1_024
__lowercase =4_096
__lowercase =24
__lowercase =16
# load original model from timm
__lowercase =timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase =timm_model.state_dict()
__lowercase =create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
__lowercase =DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowercase =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowercase =DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
__lowercase =image_processor(images=prepare_img() , return_tensors='pt' )
__lowercase =encoding['pixel_values']
__lowercase =model(_lowerCAmelCase )
__lowercase =timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__snake_case = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = OmegaConf.load(lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase ) ) )
return config
def __lowerCAmelCase ( lowercase : Dict , lowercase : Dict=None , lowercase : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if conf_path is None:
snake_case : Optional[Any] = "./model_checkpoints/vqgan_only.yaml"
snake_case : Union[str, Any] = load_config(lowercase , display=lowercase )
snake_case : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
snake_case : Optional[int] = "./model_checkpoints/vqgan_only.pt"
snake_case : Union[str, Any] = torch.load(lowercase , map_location=lowercase )
if ".ckpt" in ckpt_path:
snake_case : Union[str, Any] = sd["state_dict"]
model.load_state_dict(lowercase , strict=lowercase )
model.to(lowercase )
del sd
return model
def __lowerCAmelCase ( lowercase : str , lowercase : List[str] ) -> List[str]:
"""simple docstring"""
snake_case ,snake_case ,snake_case : List[Any] = model.encode(lowercase )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
snake_case : Union[str, Any] = model.decode(lowercase )
return xrec
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : str=False ) -> Optional[int]:
"""simple docstring"""
snake_case ,snake_case : Any = string.rsplit("." , 1 )
if reload:
snake_case : List[Any] = importlib.import_module(lowercase )
importlib.reload(lowercase )
return getattr(importlib.import_module(lowercase , package=lowercase ) , cls )
def __lowerCAmelCase ( lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __lowerCAmelCase ( lowercase : Tuple , lowercase : List[str] , lowercase : Tuple=True , lowercase : Optional[Any]=True ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[Any] = instantiate_from_config(lowercase )
if sd is not None:
model.load_state_dict(lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if ckpt:
snake_case : Dict = torch.load(lowercase , map_location="cpu" )
snake_case : Any = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
snake_case : Any = {"state_dict": None}
snake_case : List[str] = None
snake_case : Dict = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowercase , eval_mode=lowercase )["model"]
return model, global_step
| 112 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Any = []
for i in range(self.num_layers ):
__lowerCAmelCase : Tuple = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : Any = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = resnets
__lowerCAmelCase : Optional[int] = attentions
if self.add_downsample:
__lowerCAmelCase : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCAmelCase : Tuple = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Tuple = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : bool = True
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : str = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = resnets
if self.add_downsample:
__lowerCAmelCase : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : List[str] = ()
for resnet in self.resnets:
__lowerCAmelCase : List[Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Union[str, Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Union[str, Any] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resnets
__lowerCAmelCase : Optional[Any] = attentions
if self.add_upsample:
__lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[-1]
__lowerCAmelCase : List[str] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : Union[str, Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase : Optional[Any] = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : bool = True
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
for i in range(self.num_layers ):
__lowerCAmelCase : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = resnets
if self.add_upsample:
__lowerCAmelCase : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase : Any = res_hidden_states_tuple[-1]
__lowerCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : List[Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module):
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
# there is always at least one resnet
__lowerCAmelCase : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCAmelCase : List[Any] = []
for _ in range(self.num_layers ):
__lowerCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resnets
__lowerCAmelCase : Union[str, Any] = attentions
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : int = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCAmelCase : int = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states
| 86 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCAmelCase : List[str] = (low + high) // 2
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = max_subarray(snake_case__, snake_case__, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_subarray(snake_case__, mid + 1, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_cross_sum(snake_case__, snake_case__, snake_case__, snake_case__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[int, int, float]:
__UpperCAmelCase , __UpperCAmelCase : Any = float("-inf" ), -1
__UpperCAmelCase , __UpperCAmelCase : Dict = float("-inf" ), -1
__UpperCAmelCase : int | float = 0
for i in range(snake_case__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCAmelCase : Optional[int] = summ
__UpperCAmelCase : Optional[Any] = i
__UpperCAmelCase : List[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCAmelCase : List[str] = summ
__UpperCAmelCase : Dict = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCamelCase ( snake_case__ ) -> float:
__UpperCAmelCase : Optional[int] = [randint(1, snake_case__ ) for _ in range(snake_case__ )]
__UpperCAmelCase : Optional[int] = time.time()
max_subarray(snake_case__, 0, input_size - 1 )
__UpperCAmelCase : List[str] = time.time()
return end - start
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : str = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__UpperCAmelCase : Optional[Any] = [time_max_subarray(snake_case__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(snake_case__, snake_case__ ):
print(snake_case__, "\t\t", snake_case__ )
plt.plot(snake_case__, snake_case__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
__UpperCAmelCase : Tuple = 300 # TEMPERATURE (unit = K)
def a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315 | 0 |
lowerCAmelCase__ : Tuple =frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCAmelCase__ : int =frozenset(['''prompt''', '''negative_prompt'''])
lowerCAmelCase__ : List[Any] =frozenset([])
lowerCAmelCase__ : int =frozenset(['''image'''])
lowerCAmelCase__ : Dict =frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCAmelCase__ : Optional[Any] =frozenset(['''image'''])
lowerCAmelCase__ : List[str] =frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCAmelCase__ : List[str] =frozenset(['''prompt''', '''image''', '''negative_prompt'''])
lowerCAmelCase__ : Any =frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCAmelCase__ : List[Any] =frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
lowerCAmelCase__ : str =frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCAmelCase__ : Dict =frozenset(['''image''', '''mask_image'''])
lowerCAmelCase__ : Optional[Any] =frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCAmelCase__ : Optional[Any] =frozenset(['''example_image''', '''image''', '''mask_image'''])
lowerCAmelCase__ : Union[str, Any] =frozenset(['''class_labels'''])
lowerCAmelCase__ : int =frozenset(['''class_labels'''])
lowerCAmelCase__ : Dict =frozenset(['''batch_size'''])
lowerCAmelCase__ : Tuple =frozenset([])
lowerCAmelCase__ : Dict =frozenset(['''batch_size'''])
lowerCAmelCase__ : Tuple =frozenset([])
lowerCAmelCase__ : Union[str, Any] =frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCAmelCase__ : Any =frozenset(['''prompt''', '''negative_prompt'''])
lowerCAmelCase__ : List[str] =frozenset(['''input_tokens'''])
lowerCAmelCase__ : int =frozenset(['''input_tokens'''])
| 257 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = '''Speech2TextFeatureExtractor'''
UpperCamelCase__ : List[str] = '''Speech2TextTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 257 | 1 |
'''simple docstring'''
import os
def __lowerCAmelCase ():
with open(os.path.dirname(__lowerCAmelCase ) + "/grid.txt" ) as f:
_UpperCAmelCase : List[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowerCAmelCase ) for x in f.readline().split()] )
_UpperCAmelCase : Any = 0
# right
for i in range(20 ):
for j in range(17 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCAmelCase : str = temp
# down
for i in range(17 ):
for j in range(20 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCAmelCase : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_UpperCAmelCase : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCAmelCase : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCAmelCase : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 322 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]:
if tokenize_kwargs is None:
lowerCamelCase : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowerCamelCase : Union[str, Any] = truncation
lowerCamelCase : Optional[Any] = tokenize_kwargs
lowerCamelCase : List[str] = {}
if return_tensors is not None:
lowerCamelCase : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict[str, GenericTensor]:
lowerCamelCase : int = self.framework
lowerCamelCase : Optional[int] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Dict = self.model(**UpperCamelCase__ )
return model_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> str:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : List[str] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
__lowerCamelCase : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
__lowerCamelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def lowerCamelCase__ ( self : int ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__lowerCamelCase : Union[str, Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase__ ( self : List[str] ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__lowerCamelCase : str = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase__ ( self : Any ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__lowerCamelCase : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__A = Accelerator()
__A = (accelerator.state.process_index + 2, 10)
__A = torch.randint(0, 10, shape).to(accelerator.device)
__A = ''''''
__A = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__A = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__A = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 64 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _snake_case ( a__ ):
snake_case__ = "bert-generation"
def __init__( self : Optional[int] , UpperCAmelCase : Dict=50358 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : str=16 , UpperCAmelCase : str=4096 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
| 64 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( _lowerCamelCase: list[float] ):
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 112 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 112 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_UpperCAmelCase = ["""gpt2"""]
_UpperCAmelCase = """gpt2"""
if is_tf_available():
class UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
super().__init__()
A_ : Any = tokenizer
A_ : Optional[Any] = AutoConfig.from_pretrained(lowercase )
A_ : int = TFGPTaLMHeadModel.from_config(lowercase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.tokenizer(lowercase )
A_ : Tuple = tokenized['input_ids'].to_tensor()
A_ : Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A_ : List[Any] = self.model(input_ids=lowercase , attention_mask=lowercase )['logits']
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
A_ : Any = [GPTaTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A_ : Any = [TFGPTaTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ : Union[str, Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
A_ : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A_ : List[Any] = tokenizer([test_inputs] , return_tensors='tf' )
A_ : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A_ : Optional[int] = python_outputs[key].numpy()
A_ : Optional[int] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
A_ : Any = tf.function(lowercase )
for test_inputs in self.test_sentences:
A_ : Optional[int] = tf.constant(lowercase )
A_ : Tuple = compiled_tokenizer(lowercase )
A_ : Tuple = tf_tokenizer(lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
A_ : Any = ModelToSave(tokenizer=lowercase )
A_ : int = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Dict = model.serving(lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ : Dict = Path(lowercase ) / 'saved.model'
tf.saved_model.save(lowercase , lowercase , signatures={'serving_default': model.serving} )
A_ : List[Any] = tf.saved_model.load(lowercase )
A_ : List[str] = loaded_model.signatures['serving_default'](lowercase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
A_ : Dict = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : List[str] = tf_tokenizer(lowercase ) # Build model with some sample inputs
A_ : Any = tf_tokenizer.get_config()
A_ : str = TFGPTaTokenizer.from_config(lowercase )
A_ : Any = model_from_config(lowercase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A_ : Dict = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
A_ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : Any = tf_tokenizer(lowercase , max_length=lowercase )
A_ : Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 363 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : Optional[int] = latents.to(lowercase )
A_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
A_ : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
A_ : Tuple = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
A_ : Dict = self.image_processor(lowercase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowercase )
A_ : Tuple = self.image_encoder(lowercase )['last_hidden_state']
A_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : List[str] = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
A_ : str = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 2_5 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 6_4 , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
if isinstance(lowercase , PIL.Image.Image ):
A_ : int = 1
elif isinstance(lowercase , torch.Tensor ):
A_ : int = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : List[str] = len(lowercase )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}''' )
A_ : Any = self._execution_device
A_ : List[Any] = batch_size * num_images_per_prompt
A_ : int = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Dict = self.scheduler.timesteps
A_ : int = self.prior.config.num_embeddings
A_ : int = self.prior.config.embedding_dim
A_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : Union[str, Any] = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
A_ , A_ : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : Optional[int] = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
A_ : str = []
for i, latent in enumerate(lowercase ):
print()
A_ : Optional[Any] = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowercase )
A_ : Dict = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
A_ : Dict = images.cpu().numpy()
if output_type == "pil":
A_ : str = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 192 | 0 |
"""simple docstring"""
from manim import *
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Rectangle(height=0.5 , width=0.5 )
_lowerCamelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCamelCase : List[Any] = Rectangle(height=0.25 , width=0.25 )
_lowerCamelCase : int = [mem.copy() for i in range(6 )]
_lowerCamelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCamelCase : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Any = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : List[str] = Text('''CPU''' , font_size=2_4 )
_lowerCamelCase : Any = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [mem.copy() for i in range(4 )]
_lowerCamelCase : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : List[str] = Text('''GPU''' , font_size=2_4 )
_lowerCamelCase : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCamelCase : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Optional[Any] = Text('''Model''' , font_size=2_4 )
_lowerCamelCase : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[str] = []
for i, rect in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[str] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
_lowerCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
_lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )]
_lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )]
_lowerCamelCase : Optional[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Optional[Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : str = Text('''Disk''' , font_size=2_4 )
_lowerCamelCase : List[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCamelCase : List[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
_lowerCamelCase : Tuple = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowerCamelCase : Any = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
_lowerCamelCase : List[str] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowerCamelCase : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowerCamelCase : int = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowerCamelCase : Dict = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowerCamelCase : Dict = a_c
_lowerCamelCase : List[str] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
_lowerCamelCase : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 72 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = s_dict.pop(__lowerCAmelCase )
elif "subsample" in key:
SCREAMING_SNAKE_CASE__ : Dict = s_dict.pop(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int = emb.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = emb.weight.data
return lin_layer
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int = torch.load(__lowerCAmelCase , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : List[Any] = mam_aaa["""args"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = mam_aaa["""model"""]
SCREAMING_SNAKE_CASE__ : Dict = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = state_dict["""decoder.embed_tokens.weight"""].shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = args.share_decoder_input_output_embed
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(__lowerCAmelCase ) for i in args.conv_kernel_sizes.split(""",""" )]
SCREAMING_SNAKE_CASE__ : str = SpeechaTextConfig(
vocab_size=__lowerCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(__lowerCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowerCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowerCAmelCase , num_beams=5 , max_length=200 , use_cache=__lowerCAmelCase , decoder_start_token_id=2 , early_stopping=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechaTextForConditionalGeneration(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
SCREAMING_SNAKE_CASE__ : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE__ : int = lm_head_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
a :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a :str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 364 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : Dict = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
with contextlib.closing(sqlitea.connect(__lowerCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE__ : Tuple = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : str = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
SCREAMING_SNAKE_CASE__ : Tuple = iter_sql_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = iter_sql_file(__lowerCAmelCase )
for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
SCREAMING_SNAKE_CASE__ : List[str] = iter_sql_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = iter_sql_file(__lowerCAmelCase )
for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
with pytest.raises(__lowerCAmelCase ):
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 56 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
class A_ ( snake_case__ ):
_lowercase : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[int] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : List[Any] , ) -> None:
super().__init__(**UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
__lowerCAmelCase: Any = get_size_dict(UpperCAmelCase )
__lowerCAmelCase: List[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase: Optional[int] = get_size_dict(UpperCAmelCase , param_name='crop_size' )
__lowerCAmelCase: str = do_resize
__lowerCAmelCase: int = size
__lowerCAmelCase: Any = resample
__lowerCAmelCase: Union[str, Any] = do_center_crop
__lowerCAmelCase: Any = crop_size
__lowerCAmelCase: Optional[Any] = do_rescale
__lowerCAmelCase: List[str] = rescale_factor
__lowerCAmelCase: int = do_normalize
__lowerCAmelCase: Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase: Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ) -> np.ndarray:
__lowerCAmelCase: Dict = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
UpperCAmelCase , size=(size['height'], size['width']) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> np.ndarray:
__lowerCAmelCase: List[Any] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> Optional[Any]:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Union[str, Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> PIL.Image.Image:
__lowerCAmelCase: Optional[int] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase: Optional[int] = resample if resample is not None else self.resample
__lowerCAmelCase: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase: str = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase: int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase: Optional[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase: Any = image_std if image_std is not None else self.image_std
__lowerCAmelCase: Optional[Any] = size if size is not None else self.size
__lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCAmelCase )
__lowerCAmelCase: Any = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase: int = get_size_dict(UpperCAmelCase , param_name='crop_size' )
__lowerCAmelCase: Dict = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase: Union[str, Any] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCAmelCase: Dict = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCAmelCase: List[Any] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCAmelCase: int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCAmelCase: Tuple = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__lowerCAmelCase: Optional[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCAmelCase: Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 322 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 63 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''deta'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Any=2048 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=1024 , lowercase_ : Dict=8 , lowercase_ : Any=0.0 , lowercase_ : str=True , lowercase_ : List[Any]="relu" , lowercase_ : Optional[int]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1.0 , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : int="sine" , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : Any=4 , lowercase_ : Tuple=True , lowercase_ : List[Any]=300 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : str=1 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=1 , lowercase_ : int=1 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.pop("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 63 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 64 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = ort.SessionOptions()
_snake_case : Union[str, Any] = False
return options
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_snake_case : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = """A red cat sitting on a park bench"""
_snake_case : Optional[int] = np.random.RandomState(0 )
_snake_case : Any = pipe(
prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_lowerCamelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers_and_onnx''' )
def lowercase ( self : List[str] ):
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers''' , _lowerCamelCase )
self.assertIn('''flax_and_transformers''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , _lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowercase ( self : List[str] ):
_snake_case = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , '''\nCONSTANT = None\n''' )
_snake_case = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_lowerCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_snake_case = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_snake_case = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : str ):
_snake_case = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_snake_case = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _lowerCamelCase )
| 40 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 315 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A_ : List[str] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A_ : Optional[int] = 'hopper-medium-v2'
A_ : List[Any] = gym.make(env_name)
A_ : str = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A_ : List[Any] = env.reset()
A_ : Optional[int] = 0
A_ : str = 0
A_ : Optional[Any] = 1000
A_ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A_ : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
A_ , A_ , A_ , A_ : Dict = env.step(denorm_actions)
A_ : List[str] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A_ : int = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 192 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = SamImageProcessor()
SCREAMING_SNAKE_CASE_ = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **_A ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _UpperCamelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A )
SCREAMING_SNAKE_CASE_ = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ = [[683, 1024]]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , _A , _A )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , torch.tensor(_A ) , torch.tensor(_A ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_ = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , np.array(_A ) , np.array(_A ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ = [[1, 0], [0, 1]]
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , np.array(_A ) , np.array(_A ) )
@require_vision
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = SamImageProcessor()
SCREAMING_SNAKE_CASE_ = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **_A ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _UpperCamelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A )
SCREAMING_SNAKE_CASE_ = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ = [[683, 1024]]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(_A , _A , _A , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , tf.convert_to_tensor(_A ) , tf.convert_to_tensor(_A ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE_ = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , np.array(_A ) , np.array(_A ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE_ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , np.array(_A ) , np.array(_A ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = SamImageProcessor()
SCREAMING_SNAKE_CASE_ = SamProcessor(_A )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self , **_A ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def _UpperCamelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A )
SCREAMING_SNAKE_CASE_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = [tf.convert_to_tensor(_A )]
SCREAMING_SNAKE_CASE_ = [torch.tensor(_A )]
SCREAMING_SNAKE_CASE_ = [[1764, 2646]]
SCREAMING_SNAKE_CASE_ = [[683, 1024]]
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , _A , _A , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE_ = processor.post_process_masks(
_A , _A , _A , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = SamProcessor(image_processor=_A )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''pt''' )['''pixel_values'''].numpy()
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='''pt''' )['''pixel_values'''].numpy()
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''tf''' )['''pixel_values'''].numpy()
SCREAMING_SNAKE_CASE_ = processor(images=_A , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(_A , _A ) )
self.assertTrue(np.allclose(_A , _A ) )
self.assertTrue(np.allclose(_A , _A ) )
| 369 |
__UpperCAmelCase = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while place < len(__lowerCamelCase ):
if (place + 1 < len(__lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = divmod(__lowerCamelCase, __lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _lowerCamelCase ):
'''simple docstring'''
__A : int = (UnCLIPScheduler,)
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : str = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowercase_ )
return config
def _snake_case ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def _snake_case ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase_ )
def _snake_case ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def _snake_case ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase_ )
def _snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase_ )
def _snake_case ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase_ , prev_timestep=lowercase_ )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.scheduler_classes[0]
lowerCamelCase : str = self.get_scheduler_config(variance_type="fixed_small_log" )
lowerCamelCase : Optional[Any] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type="learned_range" )
lowerCamelCase : Optional[Any] = scheduler_class(**lowercase_ )
lowerCamelCase : Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase_ ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=lowercase_ ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=lowercase_ ) - -0.0010011 < 1e-5
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : Tuple = scheduler_class(**lowercase_ )
lowerCamelCase : Optional[int] = scheduler.timesteps
lowerCamelCase : str = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter
lowerCamelCase : Tuple = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowerCamelCase : Optional[Any] = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : int = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowerCamelCase : List[str] = pred_prev_sample
lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase_ ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.scheduler_classes[0]
lowerCamelCase : Optional[int] = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(25 )
lowerCamelCase : Optional[Any] = scheduler.timesteps
lowerCamelCase : int = self.dummy_model()
lowerCamelCase : Union[str, Any] = self.dummy_sample_deter
lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(lowercase_ ):
# 1. predict noise residual
lowerCamelCase : Union[str, Any] = model(lowercase_ , lowercase_ )
if i + 1 == timesteps.shape[0]:
lowerCamelCase : Any = None
else:
lowerCamelCase : Any = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Optional[int] = scheduler.step(
lowercase_ , lowercase_ , lowercase_ , prev_timestep=lowercase_ , generator=lowercase_ ).prev_sample
lowerCamelCase : List[str] = pred_prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(lowercase_ ) )
lowerCamelCase : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
| 283 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
a : Dict = (720, 1280) # Height, Width
a : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
a : Dict = 1 / 100
a : str = ''
a : Any = ''
a : Optional[int] = ''
a : List[str] = 250
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ ,snake_case_ = get_dataset(__UpperCAmelCase, __UpperCAmelCase )
for index in range(__UpperCAmelCase ):
snake_case_ = random.sample(range(len(__UpperCAmelCase ) ), 4 )
snake_case_ ,snake_case_ ,snake_case_ = update_image_and_anno(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, filter_scale=__UpperCAmelCase, )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ = random_chars(32 )
snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0]
snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg", __UpperCAmelCase, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
snake_case_ = []
for anno in new_annos:
snake_case_ = anno[3] - anno[1]
snake_case_ = anno[4] - anno[2]
snake_case_ = anno[1] + width / 2
snake_case_ = anno[2] + height / 2
snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(__UpperCAmelCase )
with open(F"{file_root}.txt", '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[list, list]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
for label_file in glob.glob(os.path.join(__UpperCAmelCase, '''*.txt''' ) ):
snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(__UpperCAmelCase ) as in_file:
snake_case_ = in_file.readlines()
snake_case_ = os.path.join(__UpperCAmelCase, F"{label_name}.jpg" )
snake_case_ = []
for obj_list in obj_lists:
snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case_ = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__UpperCAmelCase )
labels.append(__UpperCAmelCase )
return img_paths, labels
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> tuple[list, list, str]:
'''simple docstring'''
snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta )
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = int(scale_x * output_size[1] )
snake_case_ = int(scale_y * output_size[0] )
snake_case_ = []
snake_case_ = []
for i, index in enumerate(__UpperCAmelCase ):
snake_case_ = all_img_list[index]
path_list.append(__UpperCAmelCase )
snake_case_ = all_annos[index]
snake_case_ = cva.imread(__UpperCAmelCase )
if i == 0: # top-left
snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = bbox[2] * scale_y
snake_case_ = bbox[3] * scale_x
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ = cva.resize(__UpperCAmelCase, (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = bbox[2] * scale_y
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = bbox[3] * scale_x
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ = cva.resize(
__UpperCAmelCase, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 56 | 0 |
from collections.abc import Sequence
def lowerCAmelCase_ (lowerCAmelCase__: Sequence[float] , lowerCAmelCase__: bool = False ):
"""simple docstring"""
if not arr:
return 0
UpperCAmelCase_: Optional[Any] = 0 if allow_empty_subarrays else float("""-inf""" )
UpperCAmelCase_: str = 0.0
for num in arr:
UpperCAmelCase_: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase_: Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a : List[str] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 370 |
from collections import defaultdict
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[int] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCAmelCase_: List[Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) )
]
UpperCAmelCase_: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCAmelCase_: List[Any] = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCAmelCase_: List[Any] = self.count_ways_until(SCREAMING_SNAKE_CASE_, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
UpperCAmelCase_: List[Any] = total_ways_util
return self.dp[mask][task_no]
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
a : Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 82 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =AlbertTokenizer
__a =AlbertTokenizerFast
__a =True
__a =True
__a =True
def UpperCamelCase__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
_a = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Dict , __a : int ):
_a = "this is a test"
_a = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self : Any ):
_a = "<pad>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[Any] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__a ) , 3_00_00 )
def UpperCamelCase__ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCamelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = "I was born in 92000, and this is falsé."
_a = tokenizer.tokenize(__a )
_a = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_a = tokenizer.encode(__a , add_special_tokens=__a )
_a = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(__a )
_a = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def UpperCamelCase__ ( self : Dict ):
_a = AlbertTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 12_89] )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCamelCase__ ( self : int ):
_a = AlbertTokenizer(__a )
_a = tokenizer.encode("sequence builders" )
_a = tokenizer.encode("multi-sequence build" )
_a = tokenizer.build_inputs_with_special_tokens(__a )
_a = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase__ ( self : str ):
# fmt: off
_a = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''facebook/bart-large-mnli'''
lowerCamelCase_ : List[Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowerCamelCase_ : Dict = '''text_classifier'''
lowerCamelCase_ : List[str] = AutoTokenizer
lowerCamelCase_ : Optional[int] = AutoModelForSequenceClassification
lowerCamelCase_ : int = ['''text''', ['''text''']]
lowerCamelCase_ : Union[str, Any] = ['''text''']
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
super().setup()
snake_case_ : int = self.model.config
snake_case_ : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
snake_case_ : Dict = int(__magic_name__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(__magic_name__ ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : int = outputs.logits
snake_case_ : List[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 352 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87 |
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ : Tuple = random.Random()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Optional[int]=4_0_0 , UpperCAmelCase__ : Dict=2_0_0_0 , UpperCAmelCase__ : Any=2_0_4_8 , UpperCAmelCase__ : Any=1_2_8 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : List[Any]=5_1_2 , UpperCAmelCase__ : int=3_0 , UpperCAmelCase__ : Dict=4_4_1_0_0 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = num_audio_channels
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = sampling_rate
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[int]=False ) -> List[Any]:
def _flatten(UpperCAmelCase__ : Dict ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int = TvltFeatureExtractor
def UpperCAmelCase_ ( self : List[str] ) -> str:
__SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "spectrogram_length" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "feature_size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "num_audio_channels" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "hop_length" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "chunk_length" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "sampling_rate" ) )
def UpperCAmelCase_ ( self : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(UpperCAmelCase__ )[0]
check_json_file_has_correct_format(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = dict_first.pop("mel_filters" )
__SCREAMING_SNAKE_CASE = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = dict_first.pop("mel_filters" )
__SCREAMING_SNAKE_CASE = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> int:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__SCREAMING_SNAKE_CASE = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase__ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__SCREAMING_SNAKE_CASE = feature_extractor(
UpperCAmelCase__ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=UpperCAmelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase__ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(UpperCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCAmelCase__ , atol=1E-4 ) )
| 195 |
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
a__ : str = input('''Enter Video/IGTV url: ''').strip()
a__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 195 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _lowerCamelCase( UpperCamelCase_ ):
lowercase_ : Dict = '''unispeech-sat'''
def __init__( self, lowerCamelCase=32, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=1E-5, lowerCamelCase="group", lowerCamelCase="gelu", lowerCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12), lowerCamelCase=(5, 2, 2, 2, 2, 2, 2), lowerCamelCase=(10, 3, 3, 3, 3, 2, 2), lowerCamelCase=False, lowerCamelCase=1_28, lowerCamelCase=16, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=0.0_5, lowerCamelCase=10, lowerCamelCase=2, lowerCamelCase=0.0, lowerCamelCase=10, lowerCamelCase=0, lowerCamelCase=3_20, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=1_00, lowerCamelCase=2_56, lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase="mean", lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2_56, lowerCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00), lowerCamelCase=(5, 3, 3, 1, 1), lowerCamelCase=(1, 2, 3, 1, 1), lowerCamelCase=5_12, lowerCamelCase=0, lowerCamelCase=1, lowerCamelCase=2, lowerCamelCase=5_04, **lowerCamelCase, ) -> int:
"""simple docstring"""
super().__init__(**_A, pad_token_id=_A, bos_token_id=_A, eos_token_id=_A)
_lowercase : Tuple = hidden_size
_lowercase : Dict = feat_extract_norm
_lowercase : str = feat_extract_activation
_lowercase : Dict = list(_A)
_lowercase : Optional[Any] = list(_A)
_lowercase : List[str] = list(_A)
_lowercase : str = conv_bias
_lowercase : int = num_conv_pos_embeddings
_lowercase : List[Any] = num_conv_pos_embedding_groups
_lowercase : List[Any] = len(self.conv_dim)
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Tuple = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Dict = num_attention_heads
_lowercase : Any = hidden_dropout
_lowercase : Union[str, Any] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : Optional[int] = feat_proj_dropout
_lowercase : int = final_dropout
_lowercase : Tuple = layerdrop
_lowercase : Any = layer_norm_eps
_lowercase : Optional[int] = initializer_range
_lowercase : Dict = vocab_size
_lowercase : Dict = num_clusters
_lowercase : Union[str, Any] = do_stable_layer_norm
_lowercase : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase : Union[str, Any] = apply_spec_augment
_lowercase : List[Any] = mask_time_prob
_lowercase : List[Any] = mask_time_length
_lowercase : Any = mask_time_min_masks
_lowercase : int = mask_feature_prob
_lowercase : Union[str, Any] = mask_feature_length
_lowercase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowercase : Union[str, Any] = num_codevectors_per_group
_lowercase : Any = num_codevector_groups
_lowercase : Optional[Any] = contrastive_logits_temperature
_lowercase : List[str] = feat_quantizer_dropout
_lowercase : List[str] = num_negatives
_lowercase : List[Any] = codevector_dim
_lowercase : Union[str, Any] = proj_codevector_dim
_lowercase : Tuple = diversity_loss_weight
# ctc loss
_lowercase : Union[str, Any] = ctc_loss_reduction
_lowercase : Optional[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase : Union[str, Any] = list(_A)
_lowercase : List[str] = list(_A)
_lowercase : int = list(_A)
_lowercase : Dict = xvector_output_dim
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1)
| 21 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ : Optional[int] =getLogger(__name__)
lowerCAmelCase__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowercase ( a__ , a__ , a__ , a__ = 8 , a__ = DEFAULT_DEVICE , a__=False , a__="summarization" , a__=None , **a__ , ) -> Dict:
__SCREAMING_SNAKE_CASE = Path(a__ ).open('w' , encoding='utf-8' )
__SCREAMING_SNAKE_CASE = str(a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).to(a__ )
if fpaa:
__SCREAMING_SNAKE_CASE = model.half()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(a__ , a__ )
if prefix is None:
__SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(a__ , a__ ) ) ):
__SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest' ).to(a__ )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a__ , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
__SCREAMING_SNAKE_CASE = len(a__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowercase ( ) -> Any:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowercase ( a__=True ) -> int:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('model_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=a__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=a__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=a__ , required=a__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=a__ , required=a__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=a__ , required=a__ , default=a__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=a__ , required=a__ , default=a__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=a__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=a__ , default=8 , required=a__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=a__ , default=-1 , required=a__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=a__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args()
__SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(a__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__SCREAMING_SNAKE_CASE = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
a__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a__ , )
if args.reference_path is None:
return {}
# Compute scores
__SCREAMING_SNAKE_CASE = calculate_bleu if 'translation' in args.task else calculate_rouge
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a__ )]
__SCREAMING_SNAKE_CASE = score_fn(a__ , a__ )
scores.update(a__ )
if args.dump_args:
scores.update(a__ )
if args.info:
__SCREAMING_SNAKE_CASE = args.info
if verbose:
print(a__ )
if args.score_path is not None:
json.dump(a__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 257 | 0 |
def snake_case_ ( lowerCAmelCase_ : list , lowerCAmelCase_ : list , lowerCAmelCase_ : int ):
__lowercase : int = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[0] * n for i in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
__lowercase : str = y_points[i]
for i in range(2 , lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_snake_case , default=_snake_case , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_snake_case , help="""Name of the model to download""" )
download_parser.set_defaults(func=_snake_case )
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = model
UpperCAmelCase__ = cache
UpperCAmelCase__ = force
UpperCAmelCase__ = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 346 |
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case , snake_case , snake_case )
order.append(snake_case )
return order
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case , snake_case , snake_case )
return component
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = len(snake_case ) * [False]
_lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case )
_lowerCAmelCase = []
for i, was_visited in enumerate(snake_case ):
if not was_visited:
order += topology_sort(snake_case , snake_case , snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = len(snake_case ) * [False]
for i in range(len(snake_case ) ):
_lowerCAmelCase = order[len(snake_case ) - i - 1]
if not visited[vert]:
_lowerCAmelCase = find_components(snake_case , snake_case , snake_case )
components_list.append(snake_case )
return components_list
| 82 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class _a :
_lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_lowercase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
_lowercase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_lowercase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_lowercase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
lowercase__ = preds.shape
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE )
lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 351 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
_lowercase : CommonSchedulerState
# setable values
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: CommonSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray ) -> List[str]:
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : DDPMSchedulerState
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowercase : jnp.dtype
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self: Any , UpperCamelCase_: int = 1_000 , UpperCamelCase_: float = 0.0001 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[jnp.ndarray] = None , UpperCamelCase_: str = "fixed_small" , UpperCamelCase_: bool = True , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: jnp.dtype = jnp.floataa , ) -> int:
"""simple docstring"""
lowercase__ = dtype
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: int , UpperCamelCase_: Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=None ) -> List[Any]:
"""simple docstring"""
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(UpperCamelCase_ , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Any , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: int , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[jax.random.KeyArray] = None , UpperCamelCase_: bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(UpperCamelCase_ , num=1 )
lowercase__ = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self: str ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
a : Union[str, Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
a : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->int:
'''simple docstring'''
with open(_lowercase , "rb" ) as f:
a : Optional[int] = Image.open(_lowercase )
return im.convert("RGB" )
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase : Optional[str] =field(default=a__ , metadata={"""help""": """A folder containing the training data."""} )
lowerCamelCase : Optional[str] =field(default=a__ , metadata={"""help""": """A folder containing the validation data."""} )
lowerCamelCase : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCamelCase : Optional[int] =field(
default=a__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCamelCase : Optional[int] =field(
default=a__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class __UpperCamelCase :
lowerCamelCase : str =field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a__ )} , )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCamelCase : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCamelCase : str =field(default=a__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCamelCase : bool =field(
default=a__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCamelCase : bool =field(
default=a__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Optional[Any]:
'''simple docstring'''
a : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples] )
a : Dict = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a, a, a : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a, a, a : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a : List[str] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
a : int = {}
if data_args.train_dir is not None:
a : List[Any] = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
a : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
a : List[Any] = load_dataset(
"imagefolder" , data_files=_lowercase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
a : List[Any] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
a : Tuple = dataset["train"].train_test_split(data_args.train_val_split )
a : List[str] = split["train"]
a : Optional[int] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a : Tuple = dataset["train"].features["labels"].names
a, a : Optional[int] = {}, {}
for i, label in enumerate(_lowercase ):
a : Union[str, Any] = str(_lowercase )
a : Optional[Any] = label
# Load the accuracy metric from the datasets package
a : Optional[Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowercase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
a : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowercase ) , labelaid=_lowercase , idalabel=_lowercase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
a : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a : str = image_processor.size["shortest_edge"]
else:
a : Any = (image_processor.size["height"], image_processor.size["width"])
a : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
a : List[Any] = Compose(
[
RandomResizedCrop(_lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a : Optional[Any] = Compose(
[
Resize(_lowercase ),
CenterCrop(_lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(_lowercase : Optional[int] ):
a : Union[str, Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_lowercase : List[str] ):
a : List[Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
a : Union[str, Any] = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
a : Any = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_lowercase )
# Initalize our trainer
a : Optional[Any] = Trainer(
model=_lowercase , args=_lowercase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
a : Any = None
if training_args.resume_from_checkpoint is not None:
a : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a : Union[str, Any] = last_checkpoint
a : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a : Optional[Any] = trainer.evaluate()
trainer.log_metrics("eval" , _lowercase )
trainer.save_metrics("eval" , _lowercase )
# Write model card and (optionally) push to hub
a : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
if __name__ == "__main__":
main()
| 105 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = SpeechTaTokenizer
lowerCamelCase_ : int = False
lowerCamelCase_ : Dict = True
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : int = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = '''this is a test'''
snake_case_ : int = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ )
snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = '''<pad>'''
snake_case_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Optional[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Tuple = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : List[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCamelCase__ ( unittest.TestCase, lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = load_tool('text-question-answering' )
self.tool.setup()
lowerCAmelCase_ : Dict = load_tool('text-question-answering' , remote=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : List[str] = self.tool(SCREAMING_SNAKE_CASE_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'launched the BigScience Research Workshop' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.remote_tool(SCREAMING_SNAKE_CASE_ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'launched the BigScience Research Workshop' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Any = self.tool(text=SCREAMING_SNAKE_CASE_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'launched the BigScience Research Workshop' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.remote_tool(text=SCREAMING_SNAKE_CASE_ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'launched the BigScience Research Workshop' )
| 289 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-5 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Any = qkv_bias
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase_ : List[Any] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 289 | 1 |
import math
import random
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase = 0.02
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__SCREAMING_SNAKE_CASE ):
# Forward propagation
lowercase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase = (expected / 100) - layer_a
# Error delta
lowercase = layer_1_error * sigmoid_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = int(input('''Expected value: '''))
UpperCAmelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 195 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase_ ( ):
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 195 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase: Tuple = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 | 0 |
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306 | 1 |
from __future__ import annotations
class A :
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = text, pattern
lowercase__ , lowercase__ = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ (self : Any ) -> list[int]:
"""simple docstring"""
lowercase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
lowercase__ = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : List[str] = 'ABAABA'
A : Optional[Any] = 'AB'
A : List[Any] = BoyerMooreSearch(text, pattern)
A : List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 146 |
from functools import lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> set:
"""simple docstring"""
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
return len(unique_prime_factors(__magic_name__ ) )
def UpperCamelCase ( __magic_name__ : list ) -> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) in (0, 1)
def UpperCamelCase ( __magic_name__ : int ) -> list:
"""simple docstring"""
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCamelCase ( __magic_name__ : int = 4 ) -> int:
"""simple docstring"""
lowercase__ = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 146 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'bloom'
__UpperCAmelCase : Dict = ['past_key_values']
__UpperCAmelCase : Union[str, Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__(self : Dict , __UpperCAmelCase : Tuple=2_5_0_8_8_0 , __UpperCAmelCase : Union[str, Any]=6_4 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Tuple=8 , __UpperCAmelCase : str=1E-5 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=2 , __UpperCAmelCase : int=False , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : str=False , **__UpperCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase__ = kwargs.pop("n_embed" , __UpperCAmelCase )
UpperCAmelCase__ = hidden_size if n_embed is None else n_embed
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pretraining_tp
UpperCAmelCase__ = apply_residual_connection_post_layernorm
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = bos_token_id
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = slow_but_exact
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = version.parse('1.12' )
def __init__(self : Dict , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : str = "default" , __UpperCAmelCase : List[PatchingSpec] = None , __UpperCAmelCase : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ):
# TODO: how to do that better?
UpperCAmelCase__ = 0
@property
def lowercase_ (self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" , inverted_values_shape=__UpperCAmelCase )
UpperCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowercase_ (self : Any ) -> float:
"""simple docstring"""
return 1E-3
def lowercase_ (self : int , __UpperCAmelCase : "PreTrainedTokenizer" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase__ = seqlen + 2
UpperCAmelCase__ = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase__ = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase__ = ordered_inputs["attention_mask"].dtype
UpperCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 1_3
| 65 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
A : Optional[Any] = {'target_lang': 'fi', 'source_lang': 'en'}
A : List[Any] = '>>zh<<'
A : int = 'Helsinki-NLP/'
if is_torch_available():
A : Tuple = 'pt'
elif is_tf_available():
A : Any = 'tf'
else:
A : Any = 'jax'
@require_sentencepiece
class __A( a , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
__a = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = Path(self.tmpdirname )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''</s>'''
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_snake_case ) , 9 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
__a = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__a = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_snake_case , batch.input_ids[0] )
__a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
__a = [x.name for x in Path(_snake_case ).glob('''*''' )]
self.assertIn('''source.spm''' , _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_tokenizer()
__a = tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''] , padding=_snake_case , truncation=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.get_tokenizer()
__a = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__a = '''Tämä on testi'''
__a = '''This is a test'''
__a = [76, 7, 2_047, 2]
__a = [69, 12, 11, 940, 2]
__a = tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case , _snake_case )
__a = tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case , _snake_case )
__a = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( lowerCAmelCase_ ):
_snake_case : Dict = (KDPMaDiscreteScheduler,)
_snake_case : Tuple = 10
def lowerCAmelCase_ ( self : List[Any] , **__lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__lowerCAmelCase )
return config
def lowerCAmelCase_ ( self : Tuple ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__lowerCAmelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowerCAmelCase_ ( self : Optional[int] ):
if torch_device == "mps":
return
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__lowerCAmelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__lowerCAmelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
if str(__lowerCAmelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 289 |
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 289 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> Any:
'''simple docstring'''
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
A__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A__ = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict , **snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : str , **snake_case_ : Dict ) -> str:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : List[Any] , **snake_case_ : Optional[int] ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def __magic_name__ ( self : int ) -> List[str]:
'''simple docstring'''
A__ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
A__ = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(snake_case_ , return_tensors="np" )
A__ = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A__ = "lower newer"
A__ = processor(text=snake_case_ )
A__ = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A__ = self.prepare_image_inputs()
A__ = self.prepare_image_inputs()
A__ = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(snake_case_ )
A__ = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 366 |
"""simple docstring"""
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] ) -> str:
'''simple docstring'''
return F"""Node({self.data})"""
class UpperCAmelCase_ :
def __init__( self : Dict ) -> Any:
'''simple docstring'''
A__ = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : Any ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(snake_case_ ) for item in self] )
def __getitem__( self : str , snake_case_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A__ = self.head
for _ in range(snake_case_ ):
A__ = current.next
A__ = data
def __magic_name__ ( self : List[Any] , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A__ = Node(snake_case_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def __magic_name__ ( self : Dict ) -> None: # print every node data
'''simple docstring'''
print(self )
def __magic_name__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __magic_name__ ( self : Optional[Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __magic_name__ ( self : Any , snake_case_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def __magic_name__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.head is None
def __magic_name__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A__ = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(f"""length of linked_list is : {len(lowercase_ )}""" )
if __name__ == "__main__":
main()
| 230 | 0 |
import argparse
from collections import defaultdict
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_UpperCAmelCase , '''r''' ) as f:
lowercase : Union[str, Any] = f.readlines()
lowercase : List[Any] = F"""class {class_name}("""
lowercase : int = F"""{4 * ' '}def {test_name}("""
lowercase : Union[str, Any] = F"""{8 * ' '}{correct_line.split()[0]}"""
lowercase : Union[str, Any] = F"""{16 * ' '}{correct_line.split()[0]}"""
lowercase : Union[str, Any] = False
lowercase : Optional[Any] = False
lowercase : List[str] = False
lowercase : Optional[Any] = False
lowercase : List[Any] = 0
lowercase : str = 0
lowercase : Tuple = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
lowercase : List[Any] = True
elif in_class and line.startswith(_UpperCAmelCase ):
lowercase : List[str] = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
lowercase : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase : Tuple = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
lowercase : str = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def snake_case( __magic_name__ , __magic_name__=None ) -> Optional[int]:
'''simple docstring'''
if fail is not None:
with open(_UpperCAmelCase , '''r''' ) as f:
lowercase : Any = {l.strip() for l in f.readlines()}
else:
lowercase : int = None
with open(_UpperCAmelCase , '''r''' ) as f:
lowercase : Any = f.readlines()
lowercase : Any = defaultdict(_UpperCAmelCase )
for line in correct_lines:
lowercase : Optional[Any] = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 308 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[int] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 | 0 |
"""simple docstring"""
import random
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : str ) -> tuple[list[int], list[int]]:
_UpperCamelCase : List[Any] = [ord(__a ) for i in text]
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = []
for i in plain:
_UpperCamelCase : List[str] = random.randint(1 , 300 )
_UpperCamelCase : List[str] = (i + k) * k
cipher.append(__a )
key.append(__a )
return cipher, key
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : list[int] , __a : list[int] ) -> str:
_UpperCamelCase : int = []
for i in range(len(__a ) ):
_UpperCamelCase : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__a ) )
return "".join(__a )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 310 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310 | 1 |
import argparse
from collections import defaultdict
import yaml
__UpperCamelCase : List[Any] = "docs/source/en/_toctree.yml"
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : str = defaultdict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Dict = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = new_doc_list
UpperCamelCase__ : Dict = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ : int = []
for duplicate_key in duplicates:
UpperCamelCase__ : Union[str, Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ : str = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(SCREAMING_SNAKE_CASE )
# Sort
return overview_doc
def _a ( SCREAMING_SNAKE_CASE : str=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ : Optional[Any] = api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ : Optional[int] = clean_doc_toc(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ : int = True
if overwrite:
UpperCamelCase__ : Tuple = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ : str = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _a ( SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ : Union[str, Any] = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[Any] = api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ : Dict = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ : List[str] = pipeline_doc['''section''']
UpperCamelCase__ : Any = clean_doc_toc(SCREAMING_SNAKE_CASE )
if overwrite:
UpperCamelCase__ : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(SCREAMING_SNAKE_CASE )
# sort overall pipeline doc
UpperCamelCase__ : List[Any] = clean_doc_toc(SCREAMING_SNAKE_CASE )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ : str = True
if overwrite:
UpperCamelCase__ : Dict = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ : Dict = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCamelCase : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 146 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : bool = True ):
"""simple docstring"""
UpperCamelCase__ : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCamelCase__ : Optional[int] = is_compiled_module(SCREAMING_SNAKE_CASE )
if is_compiled:
UpperCamelCase__ : Optional[int] = model
UpperCamelCase__ : Optional[Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = model.module
if not keep_fpaa_wrapper:
UpperCamelCase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '''forward''' )
UpperCamelCase__ : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
UpperCamelCase__ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
UpperCamelCase__ : Optional[Any] = forward
if getattr(SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE ):
convert_model(SCREAMING_SNAKE_CASE , to_transformer_engine=SCREAMING_SNAKE_CASE )
if is_compiled:
UpperCamelCase__ : Tuple = model
UpperCamelCase__ : Tuple = compiled_model
return model
def _a ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@contextmanager
def _a ( **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for key, value in kwargs.items():
UpperCamelCase__ : Dict = str(SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if not hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ):
UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , '''__class__''' , SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = destination.setdefault(SCREAMING_SNAKE_CASE , {} )
merge_dicts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[Any] = value
return destination
def _a ( SCREAMING_SNAKE_CASE : int = None ):
"""simple docstring"""
if port is None:
UpperCamelCase__ : Union[str, Any] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 146 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['YolosFeatureExtractor']
lowerCAmelCase_ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 302 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {'''UserAgent''': UserAgent().random}
def _A ( A__ ):
"""simple docstring"""
__lowercase = script.contents[0]
__lowercase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : Any ):
__lowercase = F"https://www.instagram.com/{username}/"
__lowercase = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = requests.get(self.url ,headers=lowercase__ ).text
__lowercase = BeautifulSoup(lowercase__ ,'''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : int ):
return F"{self.__class__.__name__}(\'{self.username}\')"
def __str__( self : Dict ):
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.user_data["is_private"]
def _A ( A__ = "github" ):
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase = InstagramUser(__snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser('''github''')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 104 |
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : Any = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowercase : Optional[Any] = '''src/diffusers'''
# Matches is_xxx_available()
lowercase : Any = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowercase : Tuple = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowercase : Optional[Any] = '''
{0} = None
'''
lowercase : Union[str, Any] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowercase : List[str] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def lowerCAmelCase__ ( _a : Union[str, Any] ):
snake_case_ : str = _re_backend.findall(_a )
if len(_a ) == 0:
return None
return "_and_".join(_a )
def lowerCAmelCase__ ( ):
with open(os.path.join(_a , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ : str = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case_ : List[Any] = 0
snake_case_ : Optional[int] = {}
# Go through the end of the file
while line_index < len(_a ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case_ : Optional[int] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
snake_case_ : Dict = []
# Until we unindent, add backend objects to the list
while line_index < len(_a ) and len(lines[line_index] ) > 1:
snake_case_ : Any = lines[line_index]
snake_case_ : Tuple = _re_single_line_import.search(_a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_a ) > 0:
snake_case_ : List[str] = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase__ ( _a : Any , _a : Union[str, Any] ):
if name.isupper():
return DUMMY_CONSTANT.format(_a )
elif name.islower():
return DUMMY_FUNCTION.format(_a , _a )
else:
return DUMMY_CLASS.format(_a , _a )
def lowerCAmelCase__ ( _a : Optional[Any]=None ):
if backend_specific_objects is None:
snake_case_ : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case_ : Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
snake_case_ : List[Any] = "[" + ", ".join(F'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
snake_case_ : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_a , _a ) for o in objects] )
snake_case_ : List[Any] = dummy_file
return dummy_files
def lowerCAmelCase__ ( _a : Tuple=False ):
snake_case_ : Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case_ : str = {"torch": "pt"}
# Locate actual dummy modules and read their content.
snake_case_ : Tuple = os.path.join(_a , "utils" )
snake_case_ : str = {
backend: os.path.join(_a , F'''dummy_{short_names.get(_a , _a )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case_ : Union[str, Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_a ):
with open(_a , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ : Optional[int] = f.read()
else:
snake_case_ : str = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(_a , _a )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'''diffusers.utils.dummy_{short_names.get(_a , _a )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 36 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a : int = logging.getLogger(__name__)
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0_5_2_2, type=int)
a : List[Any] = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
a : str = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
a : str = Counter()
for tk_ids in data:
counter.update(tk_ids)
a : int = [0] * args.vocab_size
for k, v in counter.items():
a : Optional[int] = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 265 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = analyze_text(lowerCamelCase_ )
UpperCAmelCase_ : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase_ : List[str] = sum(single_char_strings.values() )
# one length string
UpperCAmelCase_ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase_ : Any = single_char_strings[ch]
UpperCAmelCase_ : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase_ ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
UpperCAmelCase_ : Tuple = sum(two_char_strings.values() )
UpperCAmelCase_ : List[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase_ : Union[str, Any] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase_ : str = two_char_strings[sequence]
UpperCAmelCase_ : Optional[Any] = int(lowerCamelCase_ ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase_ )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : Any = Counter() # type: ignore
UpperCAmelCase_ : Optional[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 274 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = ['a', 'b', 'c']
# Defaults to last layer if both are None
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(snake_case_ , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['c'] )
self.assertEqual(snake_case_ , [2] )
# Out indices set to match out features
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(['a', 'c'] , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features set to match out indices
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_aligned_output_features_output_indices(snake_case_ , [0, 2] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(snake_case_ , [-3, -1] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [-3, -1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , snake_case_ )
# Out features must be a list
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BackboneMixin()
UpperCAmelCase_ : Any = ['a', 'b', 'c']
UpperCAmelCase_ : str = ['a', 'c']
UpperCAmelCase_ : str = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase_ : str = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase_ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 274 | 1 |
from __future__ import annotations
from math import pi
def _A ( _lowercase , _lowercase , _lowercase ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True , _lowercase="pt" ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = {'add_prefix_space': True} if isinstance(_lowercase , _lowercase ) and not line.startswith(' ' ) else {}
__UpperCamelCase = padding_side
return tokenizer(
[line] , max_length=_lowercase , padding='max_length' if pad_to_max_length else None , truncation=_lowercase , return_tensors=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
def _A ( _lowercase , _lowercase , _lowercase=None , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = input_ids.ne(_lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase (_a ):
def __init__( self: List[str],A_: str,A_: List[str],A_: List[str],A_: List[str],A_: Tuple="train",A_: Any=None,A_: List[str]=None,A_: List[Any]=None,A_: int="",):
'''simple docstring'''
super().__init__()
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.source' )
__UpperCamelCase = Path(A_ ).joinpath(type_path + '.target' )
__UpperCamelCase = self.get_char_lens(self.src_file )
__UpperCamelCase = max_source_length
__UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__UpperCamelCase = tokenizer
__UpperCamelCase = prefix
if n_obs is not None:
__UpperCamelCase = self.src_lens[:n_obs]
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
def __len__( self: Optional[Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self: int,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = index + 1 # linecache starts at 1
__UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ),A_ ).rstrip('\n' )
__UpperCamelCase = linecache.getline(str(self.tgt_file ),A_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer,A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer,A_ ) else self.tokenizer
)
__UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer,A_ ) else self.tokenizer
__UpperCamelCase = encode_line(A_,A_,self.max_source_length,'right' )
__UpperCamelCase = encode_line(A_,A_,self.max_target_length,'right' )
__UpperCamelCase = source_inputs['input_ids'].squeeze()
__UpperCamelCase = target_inputs['input_ids'].squeeze()
__UpperCamelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case_ ( A_: List[Any] ):
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def snake_case_ ( self: Union[str, Any],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.stack([x['input_ids'] for x in batch] )
__UpperCamelCase = torch.stack([x['attention_mask'] for x in batch] )
__UpperCamelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
__UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer,A_ )
else self.tokenizer.pad_token_id
)
__UpperCamelCase = trim_batch(A_,A_ )
__UpperCamelCase, __UpperCamelCase = trim_batch(A_,A_,attention_mask=A_ )
__UpperCamelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__snake_case = getLogger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
return list(itertools.chain.from_iterable(_lowercase ) )
def _A ( _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = get_git_info()
save_json(_lowercase , os.path.join(_lowercase , 'git_log.json' ) )
def _A ( _lowercase , _lowercase , _lowercase=4 , **_lowercase ) -> List[Any]:
"""simple docstring"""
with open(_lowercase , 'w' ) as f:
json.dump(_lowercase , _lowercase , indent=_lowercase , **_lowercase )
def _A ( _lowercase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowercase ) as f:
return json.load(_lowercase )
def _A ( ) -> Dict:
"""simple docstring"""
__UpperCamelCase = git.Repo(search_parent_directories=_lowercase )
__UpperCamelCase = {
'repo_id': str(_lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( _lowercase , _lowercase ) -> List:
"""simple docstring"""
return list(map(_lowercase , _lowercase ) )
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'wb' ) as f:
return pickle.dump(_lowercase , _lowercase )
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
def remove_articles(_lowercase ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
__UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = normalize_answer(_lowercase ).split()
__UpperCamelCase = Counter(_lowercase ) & Counter(_lowercase )
__UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = 1.0 * num_same / len(_lowercase )
__UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
return normalize_answer(_lowercase ) == normalize_answer(_lowercase )
def _A ( _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
assert len(_lowercase ) == len(_lowercase )
__UpperCamelCase = 0
for hypo, pred in zip(_lowercase , _lowercase ):
em += exact_match_score(_lowercase , _lowercase )
if len(_lowercase ) > 0:
em /= len(_lowercase )
return {"em": em}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__UpperCamelCase = 'dropout_rate'
for p in extra_params:
if getattr(_lowercase , _lowercase , _lowercase ):
if not hasattr(_lowercase , _lowercase ) and not hasattr(_lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowercase ) )
delattr(_lowercase , _lowercase )
continue
__UpperCamelCase = p if hasattr(_lowercase , _lowercase ) else equivalent_param[p]
setattr(_lowercase , _lowercase , getattr(_lowercase , _lowercase ) )
delattr(_lowercase , _lowercase )
return hparams, config
| 310 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_SCREAMING_SNAKE_CASE = """facebook/wmt19-en-de"""
_SCREAMING_SNAKE_CASE = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_SCREAMING_SNAKE_CASE = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_SCREAMING_SNAKE_CASE = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_SCREAMING_SNAKE_CASE = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 367 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
__magic_name__: int = MBartConfig
__magic_name__: str = {}
__magic_name__: Union[str, Any] = "gelu"
def __init__( self : List[str] , _A : Optional[int] , _A : List[Any]=13 , _A : List[Any]=7 , _A : Dict=True , _A : Tuple=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : str=2 , _A : str=4 , _A : Tuple=37 , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=20 , _A : Dict=2 , _A : List[str]=1 , _A : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : int = bos_token_id
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Union[str, Any] = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int ) -> str:
"""simple docstring"""
snake_case_ : Dict = TFMBartModel(config=_A ).get_decoder()
snake_case_ : Any = inputs_dict['input_ids']
snake_case_ : List[Any] = input_ids[:1, :]
snake_case_ : Dict = inputs_dict['attention_mask'][:1, :]
snake_case_ : Tuple = inputs_dict['head_mask']
snake_case_ : List[Any] = 1
# first forward pass
snake_case_ : Any = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
snake_case_ ,snake_case_ : str = outputs.to_tuple()
snake_case_ : int = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Optional[int] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__magic_name__: int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__magic_name__: Union[str, Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__: Tuple = True
__magic_name__: Tuple = False
__magic_name__: Any = False
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] , _A : List[Any] , _A : str , _A : int , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFMBartModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
__magic_name__: Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__magic_name__: List[Any] = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : List[str] = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Dict ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
snake_case_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 88 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case : int = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=_SCREAMING_SNAKE_CASE , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=_SCREAMING_SNAKE_CASE , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=_SCREAMING_SNAKE_CASE , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=_SCREAMING_SNAKE_CASE , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=_SCREAMING_SNAKE_CASE , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=_SCREAMING_SNAKE_CASE , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=_SCREAMING_SNAKE_CASE , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__magic_name__ : Optional[Any] = parser.parse_args()
return args
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
def fn(_snake_case : str ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> int:
'''simple docstring'''
__magic_name__ : Dict = []
for i in range(len(tokenized_data["input_ids"] ) ):
__magic_name__ : Dict = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__magic_name__ : Union[str, Any] = tf.train.Features(feature=_SCREAMING_SNAKE_CASE )
__magic_name__ : Tuple = tf.train.Example(features=_SCREAMING_SNAKE_CASE )
__magic_name__ : str = example.SerializeToString()
records.append(_SCREAMING_SNAKE_CASE )
return records
def lowerCAmelCase_ ( _snake_case : str ) -> List[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__magic_name__ : Optional[int] = min(len(_SCREAMING_SNAKE_CASE ) , args.limit )
__magic_name__ : str = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
__magic_name__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__magic_name__ : Optional[int] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
__magic_name__ : List[str] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__magic_name__ : Any = tokenize_function(_SCREAMING_SNAKE_CASE )
__magic_name__ : List[str] = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_snake_case : Optional[int] ):
# Concatenate all texts.
__magic_name__ : Dict = {k: sum(examples[k] , [] ) for k in examples.keys()}
__magic_name__ : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__magic_name__ : Tuple = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__magic_name__ : Dict = {
k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__magic_name__ : List[Any] = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
__magic_name__ : Tuple = 0
__magic_name__ : Any = 0
for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ):
__magic_name__ : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
__magic_name__ : Tuple = len(dataset_snapshot["input_ids"] )
__magic_name__ : int = os.path.join(_SCREAMING_SNAKE_CASE , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__magic_name__ : Union[str, Any] = get_serialized_examples(_SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__magic_name__ : Tuple = serialized_examples[i]
out_file.write(_SCREAMING_SNAKE_CASE )
print("Wrote file {} containing {} records".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case : Dict = parse_args()
main(args)
| 281 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE :List[Any] = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
__SCREAMING_SNAKE_CASE :Optional[Any] = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
__SCREAMING_SNAKE_CASE :Any = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def lowercase ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] ):
_UpperCAmelCase = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
_UpperCAmelCase = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase = evaluate(dataset=__a , predictions=__a )
return score
| 350 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__SCREAMING_SNAKE_CASE :List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 156 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A: str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = ['input_features', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = num_mel_bins
UpperCAmelCase : Tuple = do_ceptral_normalize
UpperCAmelCase : Optional[int] = normalize_means
UpperCAmelCase : Any = normalize_vars
UpperCAmelCase : Any = True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
UpperCAmelCase : Dict = ta_kaldi.fbank(_SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
UpperCAmelCase : Tuple = x[:input_length].mean(axis=0 )
UpperCAmelCase : Optional[Any] = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if normalize_vars:
UpperCAmelCase : Tuple = x[:input_length].std(axis=0 )
UpperCAmelCase : Dict = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
UpperCAmelCase : Optional[int] = padding_value
# make sure array is in float32
UpperCAmelCase : Any = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
'''simple docstring'''
UpperCAmelCase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase : Union[str, Any] = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Union[str, Any] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Any = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCAmelCase : Union[str, Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[str] = [raw_speech]
# extract fbank features
UpperCAmelCase : Optional[int] = [self._extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase : Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCAmelCase : Tuple = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
UpperCAmelCase : str = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase : Optional[Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase : int = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase : List[str] = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase : Optional[int] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 109 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def A ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None
else:
_lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Dict = match[0]
_lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements
_lowerCAmelCase : Optional[int] = {}
for w in want_range:
_lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
_lowerCAmelCase , _lowerCAmelCase : Tuple = match[0]
_lowerCAmelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
_lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCamelCase , _lowerCamelCase )
| 36 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_snake_case = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_snake_case = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_snake_case = BeautifulSoup(res.text, 'html.parser')
_snake_case = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 199 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : Dict = 'BlipImageProcessor'
_SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
# add QFormer tokenizer
_lowercase : List[Any] = qformer_tokenizer
def __call__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_lowercase : str = BatchFeature()
if text is not None:
_lowercase : Dict = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
encoding.update(_UpperCamelCase )
_lowercase : Dict = self.qformer_tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : Union[str, Any] = qformer_text_encoding.pop("input_ids" )
_lowercase : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_lowercase : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
if os.path.isfile(_UpperCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_lowercase : Union[str, Any] = os.path.join(_UpperCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_UpperCamelCase )
return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="qformer_tokenizer" )
_lowercase : Optional[Any] = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase )
args.append(_UpperCamelCase )
return cls(*_UpperCamelCase )
| 199 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A (datasets.BuilderConfig ):
'''simple docstring'''
__lowerCamelCase : Optional[datasets.Features] = None
class A (datasets.ArrowBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = PandasConfig
def a_ ( self : int ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
A__ = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def a_ ( self : int , __lowerCAmelCase : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def a_ ( self : List[Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
with open(__lowerCAmelCase , """rb""" ) as f:
A__ = pa.Table.from_pandas(pd.read_pickle(__lowerCAmelCase ) )
yield i, self._cast_table(__lowerCAmelCase )
| 274 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : str = '''▁'''
A : Any = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A : List[Any] = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
A : Tuple = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
A : Optional[int] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None:
"""simple docstring"""
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = language_codes
A__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
A__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = vocab_file
A__ = load_json(__lowerCAmelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
A__ = len(self.encoder )
A__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
A__ = {v: k for k, v in self.lang_token_to_id.items()}
A__ = src_lang if src_lang is not None else """en"""
A__ = tgt_lang
A__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A__ = num_madeup_words
@property
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
A__ = []
A__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
A__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
A__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A__ = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
A__ = src_lang
A__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ = src_lang
A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = self.get_lang_id(__lowerCAmelCase )
A__ = tgt_lang_id
return inputs
def a_ ( self : Dict ) -> int:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a_ ( self : str , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
A__ = self.lang_token_to_id[lang_token]
A__ = [self.cur_lang_id]
A__ = [self.eos_token_id]
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
A__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
A__ = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def __lowerCamelCase ( __a :str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__a , """r""" ) as f:
return json.load(__a )
def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None:
"""simple docstring"""
with open(__a , """w""" ) as f:
json.dump(__a , __a , indent=2 )
| 274 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=None , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str]):
super().__init__(*__a , **__a)
if config is None:
assert isinstance(self.model , __a), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
_A : Dict = self.model.config
else:
_A : Any = config
_A : Optional[int] = data_args
_A : Optional[Any] = self.config.tgt_vocab_size if isinstance(self.config , __a) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..')
if self.args.label_smoothing == 0:
_A : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_A : str = label_smoothed_nll_loss
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str]):
if self.optimizer is None:
_A : List[str] = ['bias', 'LayerNorm.weight']
_A : Optional[Any] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
},
]
_A : Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_A : int = Adafactor
_A : Any = {'scale_parameter': False, 'relative_step': False}
else:
_A : int = AdamW
_A : Any = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_A : Optional[int] = self.args.learning_rate
if self.sharded_ddp:
_A : List[Any] = OSS(
params=__a , optim=__a , **__a , )
else:
_A : Dict = optimizer_cls(__a , **__a)
if self.lr_scheduler is None:
_A : int = self._get_lr_scheduler(__a)
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.')
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
_A : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_A : List[Any] = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
_A : List[str] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps)
else:
_A : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__a)
return scheduler
def A ( self : int):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_A : Optional[int] = model(**__a , use_cache=__a)[0]
_A : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1]) , labels.view(-1))
else:
# compute usual loss via models
_A , _A : Dict = model(**__a , labels=__a , use_cache=__a)[:2]
else:
# compute label smoothed loss
_A : int = model(**__a , use_cache=__a)[0]
_A : List[Any] = torch.nn.functional.log_softmax(__a , dim=-1)
_A , _A : List[Any] = self.loss_fn(__a , __a , self.args.label_smoothing , ignore_index=self.config.pad_token_id)
return loss, logits
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple):
_A : List[str] = inputs.pop('labels')
_A , _A : Union[str, Any] = self._compute_loss(__a , __a , __a)
return loss
def A ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] = None , ):
_A : Union[str, Any] = self._prepare_inputs(__a)
_A : List[str] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_A : List[str] = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_A : str = self._pad_tensors_to_max_len(__a , gen_kwargs['max_length'])
_A : List[str] = inputs.pop('labels')
with torch.no_grad():
# compute loss on predict data
_A , _A : Tuple = self._compute_loss(__a , __a , __a)
_A : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_A : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_A : Union[str, Any] = self._pad_tensors_to_max_len(__a , gen_kwargs['max_length'])
return (loss, logits, labels)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict):
_A : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}')
_A : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device)
_A : List[str] = tensor
return padded_tensor
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Any = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a (_A ):
__a : int = "perceiver"
def __init__( self : List[str] , __magic_name__ : Any=2_56 , __magic_name__ : Union[str, Any]=12_80 , __magic_name__ : Optional[int]=7_68 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Tuple=26 , __magic_name__ : Optional[Any]=8 , __magic_name__ : Dict=8 , __magic_name__ : int=None , __magic_name__ : str=None , __magic_name__ : List[str]="kv" , __magic_name__ : List[Any]=1 , __magic_name__ : List[str]=1 , __magic_name__ : List[Any]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=0.0_2 , __magic_name__ : Optional[int]=1E-12 , __magic_name__ : Dict=True , __magic_name__ : Dict=2_62 , __magic_name__ : List[Any]=20_48 , __magic_name__ : Tuple=56 , __magic_name__ : Optional[int]=[3_68, 4_96] , __magic_name__ : str=16 , __magic_name__ : Union[str, Any]=19_20 , __magic_name__ : Tuple=16 , __magic_name__ : List[Any]=[1, 16, 2_24, 2_24] , **__magic_name__ : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCAmelCase_ : Tuple = num_latents
UpperCAmelCase_ : Union[str, Any] = d_latents
UpperCAmelCase_ : List[Any] = d_model
UpperCAmelCase_ : List[Any] = num_blocks
UpperCAmelCase_ : Any = num_self_attends_per_block
UpperCAmelCase_ : List[Any] = num_self_attention_heads
UpperCAmelCase_ : Any = num_cross_attention_heads
UpperCAmelCase_ : Any = qk_channels
UpperCAmelCase_ : int = v_channels
UpperCAmelCase_ : str = cross_attention_shape_for_attention
UpperCAmelCase_ : int = self_attention_widening_factor
UpperCAmelCase_ : Any = cross_attention_widening_factor
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : int = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : int = max_position_embeddings
# image classification attributes
UpperCAmelCase_ : Optional[int] = image_size
# flow attributes
UpperCAmelCase_ : Tuple = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ : Dict = num_frames
UpperCAmelCase_ : Any = audio_samples_per_frame
UpperCAmelCase_ : Dict = samples_per_patch
UpperCAmelCase_ : Union[str, Any] = output_shape
class __a (_A ):
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self : str ) -> float:
"""simple docstring"""
return 1E-4
def UpperCAmelCase__ ( self : int , __magic_name__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , __magic_name__ : int = 3 , __magic_name__ : int = 40 , __magic_name__ : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : Dict = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : List[Any] = preprocessor.num_special_tokens_to_add(UpperCamelCase__ )
UpperCAmelCase_ : Dict = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Tuple = [''' '''.join(['''a'''] ) * seq_length] * batch_size
UpperCAmelCase_ : str = dict(preprocessor(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
UpperCAmelCase_ : List[Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ : Tuple = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : str = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
UpperCAmelCase_ : Dict = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 125 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = CanineTokenizer
_lowerCamelCase : Optional[int] = False
def lowercase ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Optional[int] ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowercase ( self : List[Any] , **snake_case_ : Dict ):
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
_UpperCAmelCase = 1_0_2_4
return tokenizer
@require_torch
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
_UpperCAmelCase = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
_UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_UpperCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
_UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , snake_case_ )
self.assertIn("attention_mask" , snake_case_ )
self.assertIn("token_type_ids" , snake_case_ )
@require_torch
def lowercase ( self : str ):
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = [
"What's the weater?",
"It's about 25 degrees.",
]
_UpperCAmelCase = tokenizer(
text_target=snake_case_ , max_length=3_2 , padding="max_length" , truncation=snake_case_ , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def lowercase ( self : List[Any] ):
# safety check on max_len default value so we are sure the test works
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ )
_UpperCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
shutil.rmtree(snake_case_ )
_UpperCAmelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = " He is very happy, UNwant\u00E9d,running"
_UpperCAmelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_UpperCAmelCase = chr(0Xe0_07 )
additional_special_tokens.append(snake_case_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ )
_UpperCAmelCase = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertIn(snake_case_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case_ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase , _UpperCAmelCase = self.get_clean_sequence(snake_case_ )
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0Xe0_05
_UpperCAmelCase = chr(snake_case_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
_UpperCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , input_encoded + special_token_id )
_UpperCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = chr(0Xe0_05 )
_UpperCAmelCase = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=snake_case_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
self.assertEqual(len(snake_case_ ) , 1 )
self.assertEqual(token_a[0] , snake_case_ )
self.assertEqual(token_a[0] , snake_case_ )
@require_tokenizers
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0Xe0_06
_UpperCAmelCase = chr(snake_case_ )
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(snake_case_ )
tokenizer.from_pretrained(snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
_UpperCAmelCase = json.load(snake_case_ )
with open(os.path.join(snake_case_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
_UpperCAmelCase = json.load(snake_case_ )
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0Xe0_06
_UpperCAmelCase = chr(snake_case_ )
_UpperCAmelCase = [new_token_a]
_UpperCAmelCase = [new_token_a]
with open(os.path.join(snake_case_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case_ , snake_case_ )
with open(os.path.join(snake_case_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case_ , snake_case_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_UpperCAmelCase = tokenizer_class.from_pretrained(snake_case_ , extra_ids=0 )
self.assertIn(snake_case_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_UpperCAmelCase = 0Xe0_07
_UpperCAmelCase = chr(snake_case_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_UpperCAmelCase = [AddedToken(snake_case_ , lstrip=snake_case_ )]
_UpperCAmelCase = tokenizer_class.from_pretrained(
snake_case_ , additional_special_tokens=snake_case_ , extra_ids=0 )
self.assertIn(snake_case_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = "hello world"
if self.space_between_special_tokens:
_UpperCAmelCase = "[CLS] hello world [SEP]"
else:
_UpperCAmelCase = input
_UpperCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.decode(snake_case_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(snake_case_ , [output, output.lower()] )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
_UpperCAmelCase = "a"
_UpperCAmelCase = ord(snake_case_ )
for attr in attributes_list:
setattr(snake_case_ , attr + "_id" , snake_case_ )
self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(getattr(snake_case_ , attr + "_id" ) , snake_case_ )
setattr(snake_case_ , attr + "_id" , snake_case_ )
self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(getattr(snake_case_ , attr + "_id" ) , snake_case_ )
setattr(snake_case_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens_ids" ) , [] )
_UpperCAmelCase = 0Xe0_06
_UpperCAmelCase = chr(snake_case_ )
setattr(snake_case_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(snake_case_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowercase ( self : Dict ):
pass
def lowercase ( self : List[str] ):
pass
def lowercase ( self : Optional[Any] ):
pass
def lowercase ( self : Any ):
pass
def lowercase ( self : Optional[Any] ):
pass
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : str ):
pass
def lowercase ( self : List[Any] ):
pass
| 370 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = '''▁'''
__SCREAMING_SNAKE_CASE :List[str] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__SCREAMING_SNAKE_CASE :Tuple = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[int] = {
'''facebook/m2m100_418M''': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE :Dict = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : List[int] = []
_lowerCamelCase : List[int] = []
def __init__( self : List[str] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : str="<s>" , snake_case_ : int="</s>" , snake_case_ : Any="</s>" , snake_case_ : List[str]="<pad>" , snake_case_ : Optional[int]="<unk>" , snake_case_ : Union[str, Any]="m2m100" , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : List[str]=8 , **snake_case_ : str , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = language_codes
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCAmelCase = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = load_json(snake_case_ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = spm_file
_UpperCAmelCase = load_spm(snake_case_ , self.sp_model_kwargs )
_UpperCAmelCase = len(self.encoder )
_UpperCAmelCase = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCAmelCase = src_lang if src_lang is not None else "en"
_UpperCAmelCase = tgt_lang
_UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCAmelCase = num_madeup_words
@property
def lowercase ( self : int ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowercase ( self : List[Any] ):
return self._src_lang
@src_lang.setter
def lowercase ( self : str , snake_case_ : str ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self : str , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase ( self : Optional[Any] , snake_case_ : int ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def lowercase ( self : Any , snake_case_ : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def lowercase ( self : List[str] , snake_case_ : List[str] ):
_UpperCAmelCase = []
_UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def lowercase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowercase ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self : Dict ):
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : List[str] , snake_case_ : Dict ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def lowercase ( self : Dict , snake_case_ : List[str] , snake_case_ : str = "en" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro" , **snake_case_ : Any , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_UpperCAmelCase = self.get_lang_id(snake_case_ )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowercase ( self : List[str] ):
self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self : Optional[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self : Any , snake_case_ : str ):
_UpperCAmelCase = self.get_lang_token(snake_case_ )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def lowercase ( self : List[Any] , snake_case_ : str ):
_UpperCAmelCase = self.get_lang_token(snake_case_ )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def lowercase ( self : Tuple , snake_case_ : str ):
return self.lang_code_to_token[lang]
def lowercase ( self : List[str] , snake_case_ : str ):
_UpperCAmelCase = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
_UpperCAmelCase = sentencepiece.SentencePieceProcessor(**__lowercase )
spm.Load(str(__lowercase ) )
return spm
def UpperCAmelCase_ ( __lowercase : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(__lowercase , "r" ) as f:
return json.load(__lowercase )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=2 )
| 156 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__lowercase : str = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(_snake_case ) , _snake_case )
def snake_case_ ( self : Optional[int] ):
__lowercase : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_snake_case ) , x.transpose() ) )
__lowercase : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case_ ( self : Tuple ):
__lowercase : Optional[int] = np.random.randn(3 , 4 )
__lowercase : List[Any] = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
__lowercase : List[Any] = np.random.randn(3 , 4 , 5 )
__lowercase : int = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case_ ( self : Tuple ):
__lowercase : Union[str, Any] = np.random.randn(3 , 4 )
__lowercase : Optional[Any] = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
__lowercase : Optional[int] = np.random.randn(3 , 4 , 5 )
__lowercase : Optional[Any] = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case_ ( self : str ):
__lowercase : Optional[Any] = np.random.randn(3 , 4 )
__lowercase : str = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , np.asarray(transpose(_snake_case ) ) ) )
__lowercase : Optional[Any] = np.random.randn(3 , 4 , 5 )
__lowercase : Optional[Any] = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(_snake_case , axes=(1, 2, 0) ) ) ) )
def snake_case_ ( self : Tuple ):
__lowercase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.reshape(_snake_case , (4, 3) ) ) )
__lowercase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.reshape(_snake_case , (12, 5) ) ) )
@require_torch
def snake_case_ ( self : Optional[Any] ):
__lowercase : str = np.random.randn(3 , 4 )
__lowercase : Optional[Any] = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
__lowercase : Optional[Any] = np.random.randn(3 , 4 , 5 )
__lowercase : int = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_tf
def snake_case_ ( self : Optional[int] ):
__lowercase : str = np.random.randn(3 , 4 )
__lowercase : List[str] = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
__lowercase : Any = np.random.randn(3 , 4 , 5 )
__lowercase : Optional[int] = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_flax
def snake_case_ ( self : Dict ):
__lowercase : Tuple = np.random.randn(3 , 4 )
__lowercase : Any = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.asarray(reshape(_snake_case , (4, 3) ) ) ) )
__lowercase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
__lowercase : Union[str, Any] = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.asarray(reshape(_snake_case , (12, 5) ) ) ) )
def snake_case_ ( self : List[Any] ):
__lowercase : Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.squeeze(_snake_case ) ) )
__lowercase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.squeeze(_snake_case , axis=2 ) ) )
@require_torch
def snake_case_ ( self : int ):
__lowercase : int = np.random.randn(1 , 3 , 4 )
__lowercase : Any = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
__lowercase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__lowercase : int = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_tf
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = np.random.randn(1 , 3 , 4 )
__lowercase : int = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
__lowercase : Any = np.random.randn(1 , 4 , 1 , 5 )
__lowercase : Any = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_flax
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = np.random.randn(1 , 3 , 4 )
__lowercase : Optional[Any] = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.asarray(squeeze(_snake_case ) ) ) )
__lowercase : Any = np.random.randn(1 , 4 , 1 , 5 )
__lowercase : Optional[int] = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.asarray(squeeze(_snake_case , axis=2 ) ) ) )
def snake_case_ ( self : Any ):
__lowercase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.expand_dims(_snake_case , axis=1 ) ) )
@require_torch
def snake_case_ ( self : List[Any] ):
__lowercase : List[str] = np.random.randn(3 , 4 )
__lowercase : Optional[Any] = torch.tensor(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_tf
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = np.random.randn(3 , 4 )
__lowercase : int = tf.constant(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_flax
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[int] = np.random.randn(3 , 4 )
__lowercase : Optional[Any] = jnp.array(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.asarray(expand_dims(_snake_case , axis=1 ) ) ) )
| 156 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : Optional[Any] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict:
__lowercase : Optional[Any] = script.contents[0]
__lowercase : int = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] ):
__lowercase : Dict = F'https://www.instagram.com/{username}/'
__lowercase : Tuple = self.get_json()
def snake_case_ ( self : Tuple ):
__lowercase : List[Any] = requests.get(self.url , headers=_snake_case ).text
__lowercase : str = BeautifulSoup(_snake_case , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Optional[Any] ):
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Optional[int] ):
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def snake_case_ ( self : Dict ):
return self.user_data["username"]
@property
def snake_case_ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["biography"]
@property
def snake_case_ ( self : Any ):
return self.user_data["business_email"]
@property
def snake_case_ ( self : int ):
return self.user_data["external_url"]
@property
def snake_case_ ( self : Union[str, Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self : Dict ):
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self : Any ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["is_private"]
def UpperCAmelCase_ ( __lowerCAmelCase = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 156 | 1 |
"""simple docstring"""
import math
import random
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE = 0.02
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
A__ = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(lowercase_ ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 1_00) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(lowercase_ , lowercase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = int(input("Expected value: "))
SCREAMING_SNAKE_CASE = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 366 |
"""simple docstring"""
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] ) -> str:
'''simple docstring'''
return F"""Node({self.data})"""
class UpperCAmelCase_ :
def __init__( self : Dict ) -> Any:
'''simple docstring'''
A__ = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : Any ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(snake_case_ ) for item in self] )
def __getitem__( self : str , snake_case_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A__ = self.head
for _ in range(snake_case_ ):
A__ = current.next
A__ = data
def __magic_name__ ( self : List[Any] , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A__ = Node(snake_case_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def __magic_name__ ( self : Dict ) -> None: # print every node data
'''simple docstring'''
print(self )
def __magic_name__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __magic_name__ ( self : Optional[Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __magic_name__ ( self : Any , snake_case_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def __magic_name__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.head is None
def __magic_name__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A__ = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(f"""length of linked_list is : {len(lowercase_ )}""" )
if __name__ == "__main__":
main()
| 230 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 199 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase__ : List[Any] =[('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
_lowerCamelCase : Optional[Any] =CursorInfo()
_lowerCamelCase : Dict =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_lowerCamelCase : Any =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
_lowerCamelCase : Any =CursorInfo()
_lowerCamelCase : Optional[Any] =ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_lowerCamelCase : Union[str, Any] =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 199 | 1 |
from __future__ import annotations
UpperCamelCase__ : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[float]:
"""simple docstring"""
a = []
a = len(snake_case_ )
for i in range(snake_case_ ):
a = -1
for j in range(i + 1, snake_case_ ):
if arr[i] < arr[j]:
a = arr[j]
break
result.append(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[float]:
"""simple docstring"""
a = []
for i, outer in enumerate(snake_case_ ):
a = -1
for inner in arr[i + 1 :]:
if outer < inner:
a = inner
break
result.append(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[float]:
"""simple docstring"""
a = len(snake_case_ )
a = []
a = [-1] * arr_size
for index in reversed(range(snake_case_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
a = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ : List[str] = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 330 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
UpperCamelCase__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = Path(tmpdirname)
UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
UpperCamelCase__ : Dict = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase__ : Union[str, Any] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCamelCase__ : Tuple = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 330 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase : Optional[Any] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """esm"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Union[str, Any] =vocab_size
a__ : List[Any] =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : str =num_attention_heads
a__ : Tuple =intermediate_size
a__ : List[Any] =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : int =max_position_embeddings
a__ : List[str] =initializer_range
a__ : Optional[int] =layer_norm_eps
a__ : Dict =position_embedding_type
a__ : int =use_cache
a__ : Tuple =emb_layer_norm_before
a__ : Union[str, Any] =token_dropout
a__ : List[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
a__ : List[Any] =EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any =EsmFoldConfig(**lowerCAmelCase__ )
a__ : Any =esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
a__ : List[Any] =get_default_vocab_list()
else:
a__ : List[str] =vocab_list
else:
a__ : Any =None
a__ : str =None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
a__ : List[str] =self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCAmelCase :
_lowercase : str = None
_lowercase : bool = True
_lowercase : bool = False
_lowercase : bool = False
_lowercase : bool = False
_lowercase : float = 0
_lowercase : bool = True
_lowercase : bool = False
_lowercase : int = 128
_lowercase : "TrunkConfig" = None
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
if self.trunk is None:
a__ : List[Any] =TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
a__ : Dict =TrunkConfig(**self.trunk )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =asdict(self )
a__ : List[str] =self.trunk.to_dict()
return output
@dataclass
class __lowerCAmelCase :
_lowercase : int = 48
_lowercase : int = 1024
_lowercase : int = 128
_lowercase : int = 32
_lowercase : int = 32
_lowercase : int = 32
_lowercase : float = 0
_lowercase : float = 0
_lowercase : bool = False
_lowercase : int = 4
_lowercase : Optional[int] = 128
_lowercase : "StructureModuleConfig" = None
def _lowercase ( self ) -> Any:
'''simple docstring'''
if self.structure_module is None:
a__ : List[Any] =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
a__ : Dict =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
a__ : int =self.sequence_state_dim // self.sequence_head_width
a__ : Dict =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =asdict(self )
a__ : List[str] =self.structure_module.to_dict()
return output
@dataclass
class __lowerCAmelCase :
_lowercase : int = 384
_lowercase : int = 128
_lowercase : int = 16
_lowercase : int = 128
_lowercase : int = 12
_lowercase : int = 4
_lowercase : int = 8
_lowercase : float = 0.1
_lowercase : int = 8
_lowercase : int = 1
_lowercase : int = 2
_lowercase : int = 7
_lowercase : int = 10
_lowercase : float = 1E-8
_lowercase : float = 1E5
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return asdict(self )
def _A ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 95 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase: Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
a = {}
a = {}
if prompt is not None:
a = prompt
if generate_kwargs is not None:
a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
a = self.model.config.model_type
if model_type == "git":
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
a = [self.tokenizer.cls_token_id] + input_ids
a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
a = None
return model_inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCamelCase_ )
and all(x is None for x in model_inputs["input_ids"] )
):
a = None
if generate_kwargs is None:
a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
a = model_inputs.pop(self.model.main_input_name )
a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
for output_ids in model_outputs:
a = {
"generated_text": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 227 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) ,"Tatoeba directory does not exist." )
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
lowercase__: Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _snake_case ( self ):
lowercase__, lowercase__: List[str] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_SCREAMING_SNAKE_CASE )
assert mmeta["long_pair"] == "heb-eng"
| 354 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = data
__lowerCAmelCase : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str) -> int:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data) + 8) % 64)
__lowerCAmelCase : Dict = self.data + padding + struct.pack(">Q" , 8 * len(self.data))
return padded_data
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = list(struct.unpack(">16L" , _snake_case)) + [0] * 64
for i in range(16 , 80):
__lowerCAmelCase : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.padding()
__lowerCAmelCase : int = self.split_blocks()
for block in self.blocks:
__lowerCAmelCase : List[str] = self.expand_block(_snake_case)
__lowerCAmelCase : str = self.h
for i in range(0 , 80):
if 0 <= i < 20:
__lowerCAmelCase : int = (b & c) | ((~b) & d)
__lowerCAmelCase : Optional[int] = 0X5A_82_79_99
elif 20 <= i < 40:
__lowerCAmelCase : Optional[Any] = b ^ c ^ d
__lowerCAmelCase : Union[str, Any] = 0X6E_D9_EB_A1
elif 40 <= i < 60:
__lowerCAmelCase : List[str] = (b & c) | (b & d) | (c & d)
__lowerCAmelCase : str = 0X8F_1B_BC_DC
elif 60 <= i < 80:
__lowerCAmelCase : int = b ^ c ^ d
__lowerCAmelCase : List[str] = 0XCA_62_C1_D6
__lowerCAmelCase : int = (
self.rotate(_snake_case , 5) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(_snake_case , 30),
c,
d,
)
__lowerCAmelCase : Optional[int] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h)
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : int = b'''Test String'''
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def _lowercase ( ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" ,dest="input_string" ,default="Hello World!! Welcome to Cryptography" ,help="Hash the string" ,)
parser.add_argument("--file" ,dest="input_file" ,help="Hash contents of a file" )
__lowerCAmelCase : Any = parser.parse_args()
__lowerCAmelCase : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"rb" ) as f:
__lowerCAmelCase : int = f.read()
else:
__lowerCAmelCase : Optional[Any] = bytes(__lowerCAmelCase ,"utf-8" )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 269 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''EncodecFeatureExtractor'''
A__ : Optional[int] = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
super().__init__(_snake_case , _snake_case )
__lowercase : List[Any] = self.feature_extractor
__lowercase : Tuple = False
def snake_case_ ( self : Optional[int] , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , _snake_case : List[str]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case )
def __call__( self : str , *_snake_case : Tuple , **_snake_case : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__lowercase : Optional[Any] = kwargs.pop('''audio''' , _snake_case )
__lowercase : str = kwargs.pop('''sampling_rate''' , _snake_case )
__lowercase : Any = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__lowercase : Dict = args[0]
__lowercase : Any = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__lowercase : str = self.tokenizer(_snake_case , **_snake_case )
if audio is not None:
__lowercase : List[str] = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowercase : Tuple = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__lowercase : Tuple = audio_inputs['''padding_mask''']
return inputs
def snake_case_ ( self : int , *_snake_case : int , **_snake_case : Any ):
__lowercase : Dict = kwargs.pop('''audio''' , _snake_case )
__lowercase : Tuple = kwargs.pop('''padding_mask''' , _snake_case )
if len(_snake_case ) > 0:
__lowercase : str = args[0]
__lowercase : Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(_snake_case , padding_mask=_snake_case )
else:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def snake_case_ ( self : Optional[int] , *_snake_case : int , **_snake_case : List[str] ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
def snake_case_ ( self : Dict , _snake_case : List[Any] , _snake_case : Optional = None ):
__lowercase : Union[str, Any] = to_numpy(_snake_case )
__lowercase , __lowercase , __lowercase : Optional[int] = audio_values.shape
if padding_mask is None:
return list(_snake_case )
__lowercase : Optional[int] = to_numpy(_snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowercase : int = seq_len - padding_mask.shape[-1]
__lowercase : Optional[int] = 1 - self.feature_extractor.padding_value
__lowercase : Tuple = np.pad(_snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=_snake_case )
__lowercase : str = audio_values.tolist()
for i in range(_snake_case ):
__lowercase : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowercase : Any = sliced_audio.reshape(_snake_case , -1 )
return audio_values
| 156 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[str] = jnp.ones((batch_size, length) ) / length
return scores
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = None
lowercase : Dict = 20
lowercase : Dict = self._get_uniform_logits(batch_size=2 ,length=snake_case )
# tweak scores to not be uniform anymore
lowercase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase : List[str] = jax.nn.softmax(snake_case ,axis=-1 )
lowercase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase : Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(snake_case ,scores.copy() ,cur_len=snake_case ) ,axis=-1 )
lowercase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(snake_case ,scores.copy() ,cur_len=snake_case ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = None
lowercase : List[Any] = 10
lowercase : Any = 2
# create ramp distribution
lowercase : List[str] = np.broadcast_to(np.arange(snake_case )[None, :] ,(batch_size, vocab_size) ).copy()
lowercase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase : Optional[Any] = FlaxTopKLogitsWarper(3 )
lowercase : List[str] = top_k_warp(snake_case ,snake_case ,cur_len=snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase : int = 5
lowercase : Dict = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowercase : str = np.broadcast_to(np.arange(snake_case )[None, :] ,(batch_size, length) ).copy()
lowercase : int = top_k_warp_safety_check(snake_case ,snake_case ,cur_len=snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = None
lowercase : Dict = 10
lowercase : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
lowercase : Dict = np.exp(top_p_warp(snake_case ,snake_case ,cur_len=snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case ,snake_case ,atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowercase : Dict = np.broadcast_to(np.arange(snake_case )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase : List[Any] = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowercase : Dict = top_p_warp(snake_case ,snake_case ,cur_len=snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = 20
lowercase : Any = 4
lowercase : List[str] = 0
lowercase : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case )
# check that min length is applied at length 5
lowercase : Optional[Any] = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowercase : Tuple = 5
lowercase : Optional[Any] = self._get_uniform_logits(snake_case ,snake_case )
lowercase : str = min_dist_processor(snake_case ,snake_case ,cur_len=snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowercase : Union[str, Any] = self._get_uniform_logits(snake_case ,snake_case )
lowercase : List[Any] = 15
lowercase : str = min_dist_processor(snake_case ,snake_case ,cur_len=snake_case )
self.assertFalse(jnp.isinf(snake_case ).any() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = 20
lowercase : str = 4
lowercase : Dict = 0
lowercase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case )
# check that all scores are -inf except the bos_token_id score
lowercase : Any = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowercase : Dict = 1
lowercase : Tuple = self._get_uniform_logits(snake_case ,snake_case )
lowercase : Any = logits_processor(snake_case ,snake_case ,cur_len=snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase : int = 3
lowercase : List[Any] = self._get_uniform_logits(snake_case ,snake_case )
lowercase : List[Any] = logits_processor(snake_case ,snake_case ,cur_len=snake_case )
self.assertFalse(jnp.isinf(snake_case ).any() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = 20
lowercase : str = 4
lowercase : Dict = 0
lowercase : Any = 5
lowercase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case ,eos_token_id=snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase : Any = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowercase : Any = 4
lowercase : Tuple = self._get_uniform_logits(snake_case ,snake_case )
lowercase : List[str] = logits_processor(snake_case ,snake_case ,cur_len=snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase : Dict = 3
lowercase : int = self._get_uniform_logits(snake_case ,snake_case )
lowercase : Dict = logits_processor(snake_case ,snake_case ,cur_len=snake_case )
self.assertFalse(jnp.isinf(snake_case ).any() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = 4
lowercase : Union[str, Any] = 10
lowercase : List[Any] = 15
lowercase : Any = 2
lowercase : Tuple = 1
lowercase : Union[str, Any] = 15
# dummy input_ids and scores
lowercase : str = ids_tensor((batch_size, sequence_length) ,snake_case )
lowercase : str = input_ids.copy()
lowercase : Any = self._get_uniform_logits(snake_case ,snake_case )
lowercase : Any = scores.copy()
# instantiate all dist processors
lowercase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
lowercase : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case )
lowercase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case )
lowercase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case ,eos_token_id=snake_case )
lowercase : Optional[Any] = 10
# no processor list
lowercase : List[str] = temp_dist_warp(snake_case ,snake_case ,cur_len=snake_case )
lowercase : Optional[Any] = top_k_warp(snake_case ,snake_case ,cur_len=snake_case )
lowercase : str = top_p_warp(snake_case ,snake_case ,cur_len=snake_case )
lowercase : List[Any] = min_dist_proc(snake_case ,snake_case ,cur_len=snake_case )
lowercase : List[Any] = bos_dist_proc(snake_case ,snake_case ,cur_len=snake_case )
lowercase : Optional[Any] = eos_dist_proc(snake_case ,snake_case ,cur_len=snake_case )
# with processor list
lowercase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase : int = processor(snake_case ,snake_case ,cur_len=snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case ,snake_case ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = 4
lowercase : int = 10
lowercase : List[Any] = 15
lowercase : Tuple = 2
lowercase : Optional[int] = 1
lowercase : Any = 15
# dummy input_ids and scores
lowercase : int = ids_tensor((batch_size, sequence_length) ,snake_case )
lowercase : Dict = input_ids.copy()
lowercase : int = self._get_uniform_logits(snake_case ,snake_case )
lowercase : Optional[Any] = scores.copy()
# instantiate all dist processors
lowercase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase : Dict = FlaxTopKLogitsWarper(3 )
lowercase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case )
lowercase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case )
lowercase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case ,eos_token_id=snake_case )
lowercase : Optional[Any] = 10
# no processor list
def run_no_processor_list(snake_case ,snake_case ,snake_case ):
lowercase : Optional[Any] = temp_dist_warp(snake_case ,snake_case ,cur_len=snake_case )
lowercase : Optional[Any] = top_k_warp(snake_case ,snake_case ,cur_len=snake_case )
lowercase : List[str] = top_p_warp(snake_case ,snake_case ,cur_len=snake_case )
lowercase : Union[str, Any] = min_dist_proc(snake_case ,snake_case ,cur_len=snake_case )
lowercase : List[str] = bos_dist_proc(snake_case ,snake_case ,cur_len=snake_case )
lowercase : Any = eos_dist_proc(snake_case ,snake_case ,cur_len=snake_case )
return scores
# with processor list
def run_processor_list(snake_case ,snake_case ,snake_case ):
lowercase : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase : Union[str, Any] = processor(snake_case ,snake_case ,cur_len=snake_case )
return scores
lowercase : List[Any] = jax.jit(snake_case )
lowercase : Dict = jax.jit(snake_case )
lowercase : Tuple = jitted_run_no_processor_list(snake_case ,snake_case ,snake_case )
lowercase : Any = jitted_run_processor_list(snake_case ,snake_case ,snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case ,snake_case ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 370 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
# vision encoder
if "img_encoder.pos_embed" in name:
lowercase : Dict = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase : Any = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase : Tuple = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase : Dict = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase : Tuple = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase : List[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase : Tuple = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase : Any = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase : Dict = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase : List[str] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase : Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase : Union[str, Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase : Optional[Any] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase : List[str] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase : Tuple = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : Dict = key.split(""".""" )
lowercase , lowercase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowercase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowercase : str = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Dict = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : int = key.split(""".""" )
lowercase : Tuple = int(key_split[3] )
lowercase : str = config.text_config.hidden_size
if "weight" in key:
lowercase : Optional[int] = val[:dim, :]
lowercase : Optional[Any] = val[
dim : dim * 2, :
]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Optional[int] = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : List[str] = val[-dim:]
else:
lowercase : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase : str = val.squeeze_()
else:
lowercase : Any = val
return orig_state_dict
def _snake_case( ) -> List[Any]:
lowercase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__=False ) -> str:
lowercase : Dict = GroupViTConfig()
lowercase : Tuple = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
lowercase : Tuple = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase : Optional[Any] = prepare_img()
lowercase : List[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
lowercase : Optional[int] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase : int = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 0 |
'''simple docstring'''
import baseaa
def __lowerCamelCase ( _lowercase ) -> bytes:
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def __lowerCamelCase ( _lowercase ) -> str:
return baseaa.aaadecode(__lowerCAmelCase ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230 | 0 |
from __future__ import annotations
import time
a = list[tuple[int, int]]
a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Node | None ):
_A = pos_x
_A = pos_y
_A = (pos_y, pos_x)
_A = goal_x
_A = goal_y
_A = parent
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : tuple[int, int] , _UpperCAmelCase : tuple[int, int] ):
_A = Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
_A = Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
_A = [self.start]
_A = False
def lowerCAmelCase_ ( self : Any ):
while self.node_queue:
_A = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_A = True
return self.retrace_path(_UpperCAmelCase )
_A = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Node ):
_A = []
for action in delta:
_A = parent.pos_x + action[1]
_A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Node | None ):
_A = node
_A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_A = current_node.parent
path.reverse()
return path
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ):
_A = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
_A = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
_A = False
def lowerCAmelCase_ ( self : int ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_A = self.fwd_bfs.node_queue.pop(0 )
_A = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_A = True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
_A = current_bwd_node
_A = current_fwd_node
_A = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Node , _UpperCAmelCase : Node ):
_A = self.fwd_bfs.retrace_path(_UpperCAmelCase )
_A = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
_A = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a = (0, 0)
a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a = time.time()
a = BreadthFirstSearch(init, goal)
a = bfs.search()
a = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
a = time.time()
a = BidirectionalBreadthFirstSearch(init, goal)
a = bd_bfs.search()
a = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 367 |
"""simple docstring"""
from collections import deque
class lowercase_ :
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = process_name # process name
_A = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_A = arrival_time
_A = burst_time # remaining burst time
_A = 0 # total time of the process wait in ready queue
_A = 0 # time from arrival time to completion time
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int , ):
# total number of mlfq's queues
_A = number_of_queues
# time slice of queues that round robin algorithm applied
_A = time_slices
# unfinished process is in this ready_queue
_A = queue
# current time
_A = current_time
# finished process is in this sequence queue
_A = deque()
def lowerCAmelCase_ ( self : Dict ):
_A = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : list[Process] ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : list[Process] ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : list[Process] ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : deque[Process] ):
return [q.burst_time for q in queue]
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : deque[Process] ):
_A = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_A = 0
# set the process's turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# set the completion time
_A = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : deque[Process] , _UpperCAmelCase : int ):
_A = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_A = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_A = 0
# set the finish time
_A = self.current_time
# update the process' turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase_ ( self : str ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
_A , _A = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a = Process('''P1''', 0, 53)
a = Process('''P2''', 0, 17)
a = Process('''P3''', 0, 68)
a = Process('''P4''', 0, 24)
a = 3
a = [17, 25]
a = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
a = Process('''P1''', 0, 53)
a = Process('''P2''', 0, 17)
a = Process('''P3''', 0, 68)
a = Process('''P4''', 0, 24)
a = 3
a = [17, 25]
a = deque([Pa, Pa, Pa, Pa])
a = MLFQ(number_of_queues, time_slices, queue, 0)
a = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 271 | 0 |
import numpy as np
a_ = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = np.array(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = np.where(letter == self.SQUARE )
__lowerCamelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = message.lower()
__lowerCamelCase = message.replace(''' ''' , '''''' )
__lowerCamelCase = message.replace('''j''' , '''i''' )
__lowerCamelCase = np.empty((2, len(__UpperCAmelCase )) )
for letter_index in range(len(__UpperCAmelCase ) ):
__lowerCamelCase = self.letter_to_numbers(message[letter_index] )
__lowerCamelCase = numbers[0]
__lowerCamelCase = numbers[1]
__lowerCamelCase = first_step.reshape(2 * len(__UpperCAmelCase ) )
__lowerCamelCase = ''''''
for numbers_index in range(len(__UpperCAmelCase ) ):
__lowerCamelCase = int(second_step[numbers_index * 2] )
__lowerCamelCase = int(second_step[(numbers_index * 2) + 1] )
__lowerCamelCase = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = encoded_message + letter
return encoded_message
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = message.lower()
message.replace(''' ''' , '''''' )
__lowerCamelCase = np.empty(2 * len(__UpperCAmelCase ) )
for letter_index in range(len(__UpperCAmelCase ) ):
__lowerCamelCase = self.letter_to_numbers(message[letter_index] )
__lowerCamelCase = numbers[0]
__lowerCamelCase = numbers[1]
__lowerCamelCase = first_step.reshape((2, len(__UpperCAmelCase )) )
__lowerCamelCase = ''''''
for numbers_index in range(len(__UpperCAmelCase ) ):
__lowerCamelCase = int(second_step[0, numbers_index] )
__lowerCamelCase = int(second_step[1, numbers_index] )
__lowerCamelCase = self.numbers_to_letter(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = decoded_message + letter
return decoded_message
| 330 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 330 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int]=13 ,__lowerCamelCase : Union[str, Any]=10 ,__lowerCamelCase : List[str]=3 ,__lowerCamelCase : int=2 ,__lowerCamelCase : List[Any]=2 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[str]=32 ,__lowerCamelCase : List[str]=5 ,__lowerCamelCase : Union[str, Any]=4 ,__lowerCamelCase : List[str]=37 ,__lowerCamelCase : Optional[Any]="gelu" ,__lowerCamelCase : Optional[Any]=0.1 ,__lowerCamelCase : str=0.1 ,__lowerCamelCase : List[str]=10 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : Union[str, Any]="divided_space_time" ,__lowerCamelCase : Optional[int]=None ,):
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = num_channels
a = patch_size
a = num_frames
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = attention_type
a = initializer_range
a = scope
a = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a = (image_size // patch_size) ** 2
a = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] ,self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
a = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ):
'''simple docstring'''
a = TimesformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Any ,__lowerCamelCase : int ):
'''simple docstring'''
a = TimesformerForVideoClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
# verify the logits shape
a = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = TimesformerModelTester(self )
a = ConfigTester(
self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase ,hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : str=False ):
'''simple docstring'''
a = copy.deepcopy(__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
a = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase ,nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TimesformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
a = self.model_tester.seq_length
a = self.model_tester.num_frames
a = True
a = False
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
a = len(__lowerCamelCase )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
self.assertEqual(out_len + 1 ,len(__lowerCamelCase ) )
a = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase : Any ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int] ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
a = outputs.hidden_states
a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) ,__lowerCamelCase )
a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
a = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__lowerCamelCase )
a = self.default_image_processor
a = prepare_video()
a = image_processor(video[:8] ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
a = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
| 330 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
stooge(snake_case_, 0, len(snake_case_ ) - 1 )
return arr
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a , a = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case_, snake_case_, (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case_, i + t, (snake_case_) )
# Recursively sort first 2/3 elements
stooge(snake_case_, snake_case_, (h - t) )
if __name__ == "__main__":
UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 330 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _A ( unittest.TestCase ):
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
torch.manual_seed(0)
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.dummy_uncond_unet
__a = PNDMScheduler()
__a = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
pndm.to(__SCREAMING_SNAKE_CASE)
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.manual_seed(0)
__a = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type='''numpy''').images
__a = torch.manual_seed(0)
__a = pndm(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=20 , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE)[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = '''google/ddpm-cifar10-32'''
__a = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = PNDMScheduler()
__a = PNDMPipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
pndm.to(__SCREAMING_SNAKE_CASE)
pndm.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
__a = torch.manual_seed(0)
__a = pndm(generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''').images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 49 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase : List[Any] = logging.getLogger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ):
'''simple docstring'''
lowercase__ = label_idx
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
lowercase__ = []
lowercase__ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowercase__ = []
lowercase__ = []
else:
lowercase__ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
lowercase__ = f.read().splitlines()
if "O" not in labels:
lowercase__ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
lowercase__ = mode.value
lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" )
lowercase__ = 1
lowercase__ = []
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = []
lowercase__ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ):
'''simple docstring'''
lowercase__ = 0
for sentence in parse_incr(UpperCamelCase ):
lowercase__ = preds_list[example_id]
lowercase__ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
if path:
with open(UpperCamelCase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 2 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[Any] , __A: list[tuple[float, float]] ) -> Optional[Any]:
_A = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_A = len(__A ) - 1
def __A ( self: Optional[Any] , __A: float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__A ) , 5 ) == 1
return output_values
def __A ( self: List[Any] , __A: float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_A = self.basis_function(__A )
_A = 0.0
_A = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self: List[Any] , __A: float = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
_A = [] # x coordinates of points to plot
_A = [] # y coordinates of points to plot
_A = 0.0
while t <= 1:
_A = self.bezier_curve_function(__A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_A = [i[0] for i in self.list_of_points]
_A = [i[1] for i in self.list_of_points]
plt.plot(
__A , __A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__A , __A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 75 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self: Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self: Any ) -> Union[str, Any]:
_A = self.dummy_uncond_unet
_A = ScoreSdeVeScheduler()
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A ).images
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__A , return_dict=__A )[
0
]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Any:
_A = '''google/ncsnpp-church-256'''
_A = UNetaDModel.from_pretrained(__A )
_A = ScoreSdeVeScheduler.from_pretrained(__A )
_A = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
_A = torch.manual_seed(0 )
_A = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__A ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( _snake_case : Dict ) -> int:
'''simple docstring'''
def is_in_circle(_snake_case : Any , _snake_case : List[str] ) -> bool:
__magic_name__ : str = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__magic_name__ : int = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__magic_name__ : int = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] = 0.0 , _snake_case : int = 1.0 , ) -> Optional[int]:
'''simple docstring'''
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Union[str, Any] = 0.0 , _snake_case : Union[str, Any] = 1.0 ) -> Dict:
'''simple docstring'''
def identity_function(_snake_case : List[str] ) -> float:
return x
__magic_name__ : Optional[int] = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : str = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
def function_to_integrate(_snake_case : Tuple ) -> float:
return sqrt(4.0 - x * x )
__magic_name__ : Any = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase : Union[str, Any] = generate_large_matrix()
_UpperCAmelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
snake_case_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : Dict = '''true'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Tuple=82 , _UpperCAmelCase : Tuple=16 ):
set_seed(42 )
lowerCAmelCase = RegressionModel()
lowerCAmelCase = deepcopy(_UpperCAmelCase )
lowerCAmelCase = RegressionDataset(length=_UpperCAmelCase )
lowerCAmelCase = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase )
model.to(accelerator.device )
lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : Dict=False ):
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCAmelCase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_UpperCAmelCase : Dict ):
lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
with accelerator.main_process_first():
lowerCAmelCase = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase : int ):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16 )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ):
lowerCAmelCase = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase )
lowerCAmelCase = get_dataloader(_UpperCAmelCase , not dispatch_batches )
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ):
lowerCAmelCase = []
for batch in dataloader:
lowerCAmelCase ,lowerCAmelCase = batch.values()
with torch.no_grad():
lowerCAmelCase = model(_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase ,lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase )
targs.append(_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = torch.cat(_UpperCAmelCase ), torch.cat(_UpperCAmelCase )
return logits, targs
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : str=82 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Any=16 ):
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert (
len(_UpperCAmelCase ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase )}'
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False ):
lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
lowerCAmelCase ,lowerCAmelCase = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase )
# First do baseline
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = setup['no']
model.to(_UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase )
with torch.inference_mode():
lowerCAmelCase = model(**_UpperCAmelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'] )
lowerCAmelCase = metric.compute()
# Then do distributed
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase = model(**_UpperCAmelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase = batch['labels']
lowerCAmelCase ,lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_UpperCAmelCase , _UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCAmelCase = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512 )
accelerator.state._reset_state()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 309 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : str = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = '''efficientformer'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] = [3, 2, 6, 4] , SCREAMING_SNAKE_CASE_ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , SCREAMING_SNAKE_CASE_ : List[bool] = [True, True, True, True] , SCREAMING_SNAKE_CASE_ : int = 4_4_8 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 4 , SCREAMING_SNAKE_CASE_ : int = 7 , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 4 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 1_6 , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : float = 1E-5 , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1E-12 , SCREAMING_SNAKE_CASE_ : int = 2_2_4 , SCREAMING_SNAKE_CASE_ : float = 1E-05 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
super().__init__(**_a )
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = hidden_sizes
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : int = mlp_expansion_ratio
lowerCAmelCase_ : List[Any] = downsamples
lowerCAmelCase_ : Any = dim
lowerCAmelCase_ : Tuple = key_dim
lowerCAmelCase_ : Union[str, Any] = attention_ratio
lowerCAmelCase_ : Union[str, Any] = resolution
lowerCAmelCase_ : Union[str, Any] = pool_size
lowerCAmelCase_ : Tuple = downsample_patch_size
lowerCAmelCase_ : Tuple = downsample_stride
lowerCAmelCase_ : str = downsample_pad
lowerCAmelCase_ : str = drop_path_rate
lowerCAmelCase_ : List[str] = num_metaad_blocks
lowerCAmelCase_ : Any = distillation
lowerCAmelCase_ : Any = use_layer_scale
lowerCAmelCase_ : Optional[Any] = layer_scale_init_value
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : List[str] = batch_norm_eps
| 224 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : Union[str, Any] = batch_size
_a : Tuple = is_training
_a : List[str] = use_auxiliary_loss
_a : Optional[Any] = num_queries
_a : str = num_channels
_a : List[str] = min_size
_a : int = max_size
_a : Optional[int] = num_labels
_a : List[str] = hidden_dim
_a : int = hidden_dim
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : str = self.num_queries
_a : Union[str, Any] = self.num_labels
_a : Tuple = [1, 1, 1, 1]
_a : Dict = self.num_channels
_a : str = 64
_a : Tuple = 128
_a : Optional[Any] = self.hidden_dim
_a : Union[str, Any] = self.hidden_dim
_a : List[Any] = self.hidden_dim
return config
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : str = output.encoder_hidden_states
_a : Any = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[Any] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : int = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = MaskaFormerModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : Dict = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = (self.model_tester.min_size,) * 2
_a : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : List[Any] = self.model_tester.get_config()
_a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : str = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(_a )
model.to(_a )
model.train()
_a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = True
_a : str = True
_a : List[str] = model_class(_a ).to(_a )
model.train()
_a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : int = self.default_image_processor
_a : Tuple = prepare_img()
_a : Any = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Any = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[int] = model(**_a )
# masks_queries_logits
_a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Optional[Any] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_a )
_a : str = [el.to(_a ) for el in inputs['mask_labels']]
_a : Dict = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[str] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 271 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set({"(", "[", "{"} )
SCREAMING_SNAKE_CASE = set({")", "]", "}"} )
SCREAMING_SNAKE_CASE = {"{": "}", "[": "]", "(": ")"}
for i in range(len(UpperCAmelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCAmelCase__ ) == 0 or (len(UpperCAmelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCAmelCase__ ) == 0
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = input("Enter sequence of brackets: " )
if is_balanced(UpperCAmelCase__ ):
print(UpperCAmelCase__ , "is balanced" )
else:
print(UpperCAmelCase__ , "is not balanced" )
if __name__ == "__main__":
main()
| 352 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( a ):
lowercase__ : Optional[Any] = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ : Optional[int] = """CIDAS/clipseg-rd64-refined"""
lowercase__ : Tuple = """image_segmenter"""
lowercase__ : Optional[Any] = CLIPSegForImageSegmentation
lowercase__ : int = ["""image""", """text"""]
lowercase__ : List[str] = ["""image"""]
def __init__( self : str , *_UpperCamelCase : str , **_UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : "Image" , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCamelCase , return_tensors="pt" )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE = self.model(**_UpperCamelCase ).logits
return logits
def __snake_case( self : Any , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 206 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = BarthezTokenizer
lowerCAmelCase__ = BarthezTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCAmelCase )
__lowerCamelCase = tokenizer
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''<pad>'''
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__UpperCAmelCase ) , 101122 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowerCamelCase = [0, 57, 3018, 70307, 91, 2]
__lowerCamelCase = self.tokenizer(
__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase = tokenizer.tokenize(__UpperCAmelCase )
__lowerCamelCase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowerCamelCase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = tokenizer.encode(__UpperCAmelCase )
__lowerCamelCase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__UpperCAmelCase , )
| 330 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ):
if config_path is not None:
__lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatConfig()
__lowerCamelCase = ''''''
if is_finetuned:
__lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Dict=False , _A : Dict=False , _A : Optional[Any]=False , ) -> List[str]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : int = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Optional[Any] = np.asarray(_A )
snake_case_ : Optional[Any] = np.asarray(_A )
if ignore_case:
snake_case_ : int = np.char.lower(_A )
snake_case_ : List[str] = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : str = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Any = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : int = string.digits.maketrans('' , '' , string.digits )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = np.char.translate(_A , table=_A )
snake_case_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 88 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ : Tuple = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 75 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='efficientformer'
def __init__( self, lowerCAmelCase = [3, 2, 6, 4], lowerCAmelCase = [48, 96, 224, 448], lowerCAmelCase = [True, True, True, True], lowerCAmelCase = 448, lowerCAmelCase = 32, lowerCAmelCase = 4, lowerCAmelCase = 7, lowerCAmelCase = 5, lowerCAmelCase = 8, lowerCAmelCase = 4, lowerCAmelCase = 0.0, lowerCAmelCase = 16, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = 2, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 1e-5, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_2, lowerCAmelCase = 1e-12, lowerCAmelCase = 224, lowerCAmelCase = 1e-05, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =depths
lowerCamelCase_ =mlp_expansion_ratio
lowerCamelCase_ =downsamples
lowerCamelCase_ =dim
lowerCamelCase_ =key_dim
lowerCamelCase_ =attention_ratio
lowerCamelCase_ =resolution
lowerCamelCase_ =pool_size
lowerCamelCase_ =downsample_patch_size
lowerCamelCase_ =downsample_stride
lowerCamelCase_ =downsample_pad
lowerCamelCase_ =drop_path_rate
lowerCamelCase_ =num_metaad_blocks
lowerCamelCase_ =distillation
lowerCamelCase_ =use_layer_scale
lowerCamelCase_ =layer_scale_init_value
lowerCamelCase_ =image_size
lowerCamelCase_ =batch_norm_eps
| 75 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 163 | 0 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
_lowerCAmelCase : List[str] = value_function
_lowerCAmelCase : List[str] = unet
_lowerCAmelCase : Dict = scheduler
_lowerCAmelCase : int = env
_lowerCAmelCase : Union[str, Any] = env.get_dataset()
_lowerCAmelCase : List[Any] = {}
for key in self.data.keys():
try:
_lowerCAmelCase : List[Any] = self.data[key].mean()
except: # noqa: E722
pass
_lowerCAmelCase : int = {}
for key in self.data.keys():
try:
_lowerCAmelCase : Optional[int] = self.data[key].std()
except: # noqa: E722
pass
_lowerCAmelCase : List[Any] = env.observation_space.shape[0]
_lowerCAmelCase : List[str] = env.action_space.shape[0]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self , snake_case_ ):
if type(snake_case_ ) is dict:
return {k: self.to_torch(snake_case_ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case_ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case_ , device=self.unet.device )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
for key, val in cond.items():
_lowerCAmelCase : List[Any] = val.clone()
return x_in
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = x.shape[0]
_lowerCAmelCase : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCAmelCase : Dict = torch.full((batch_size,) , snake_case_ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCAmelCase : int = self.value_function(x.permute(0 , 2 , 1 ) , snake_case_ ).sample
_lowerCAmelCase : Any = torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCAmelCase : Any = self.scheduler._get_variance(snake_case_ )
_lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * posterior_variance )
_lowerCAmelCase : str = model_std * grad
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Union[str, Any] = x.detach()
_lowerCAmelCase : str = x + scale * grad
_lowerCAmelCase : List[str] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_lowerCAmelCase : str = self.unet(x.permute(0 , 2 , 1 ) , snake_case_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCAmelCase : Dict = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , predict_epsilon=snake_case_ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
_lowerCAmelCase : int = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_lowerCAmelCase : Optional[Any] = self.to_torch(snake_case_ )
return x, y
def __call__( self , snake_case_ , snake_case_=6_4 , snake_case_=3_2 , snake_case_=2 , snake_case_=0.1 ):
# normalize the observations and create batch dimension
_lowerCAmelCase : int = self.normalize(snake_case_ , """observations""" )
_lowerCAmelCase : Dict = obs[None].repeat(snake_case_ , axis=0 )
_lowerCAmelCase : List[str] = {0: self.to_torch(snake_case_ )}
_lowerCAmelCase : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCAmelCase : Tuple = randn_tensor(snake_case_ , device=self.unet.device )
_lowerCAmelCase : Optional[Any] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_lowerCAmelCase : str = self.to_torch(snake_case_ )
# run the diffusion process
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.run_diffusion(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# sort output trajectories by value
_lowerCAmelCase : Tuple = y.argsort(0 , descending=snake_case_ ).squeeze()
_lowerCAmelCase : List[str] = x[sorted_idx]
_lowerCAmelCase : int = sorted_values[:, :, : self.action_dim]
_lowerCAmelCase : Optional[int] = actions.detach().cpu().numpy()
_lowerCAmelCase : int = self.de_normalize(snake_case_ , key="""actions""" )
# select the action with the highest value
if y is not None:
_lowerCAmelCase : Union[str, Any] = 0
else:
# if we didn't run value guiding, select a random action
_lowerCAmelCase : str = np.random.randint(0 , snake_case_ )
_lowerCAmelCase : Dict = denorm_actions[selected_index, 0]
return denorm_actions
| 309 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309 | 1 |
def SCREAMING_SNAKE_CASE ( ):
return 1
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 200 ):
return two_pound(snake_case_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 286 |
import sys
__lowerCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
snake_case__ : Any = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
snake_case__ : Tuple = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : Dict = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 286 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : str ={
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=10_24 , lowerCamelCase__=10_24 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase__ )
A_ : Union[str, Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""train""" , **lowerCamelCase__ )
A_ : Optional[Any] = tok.pad_token_id
def get_lens(lowerCamelCase__ ):
A_ : int = tqdm(
DataLoader(lowerCamelCase__ , batch_size=5_12 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A_ : int = []
for batch in dl:
A_ : str = batch["""input_ids"""].ne(lowerCamelCase__ ).sum(1 ).tolist()
A_ : Tuple = batch["""labels"""].ne(lowerCamelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ):
max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) )
else:
max_lens.extend(lowerCamelCase__ )
return max_lens
A_ : str = get_lens(lowerCamelCase__ )
A_ : Dict = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""val""" , **lowerCamelCase__ )
A_ : List[Any] = get_lens(lowerCamelCase__ )
pickle_save(lowerCamelCase__ , train_ds.len_file )
pickle_save(lowerCamelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 206 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A_ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self : Any , snake_case : bool = True ):
'''simple docstring'''
A__ : dict[T, list[T]] = {} # dictionary of lists
A__ : Union[str, Any] = directed
def _UpperCamelCase ( self : Optional[int] , snake_case : T , snake_case : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
self.adj_list[destination_vertex].append(snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
A__ : List[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case )
A__ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A__ : str = [destination_vertex]
A__ : Union[str, Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case )
A__ : Dict = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A__ : Union[str, Any] = [destination_vertex]
A__ : Tuple = []
return self
def __repr__( self : Any ):
'''simple docstring'''
return pformat(self.adj_list )
| 296 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple=13 , snake_case : Dict=7 , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Any=True , snake_case : List[str]=99 , snake_case : str=64 , snake_case : Optional[int]=5 , snake_case : str=4 , snake_case : List[Any]=37 , snake_case : Optional[Any]="gelu" , snake_case : List[str]=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : List[Any]=2 , snake_case : Optional[int]=0.02 , snake_case : Any=3 , snake_case : Union[str, Any]=4 , snake_case : Dict=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : Union[str, Any] = batch_size
A__ : List[str] = seq_length
A__ : Optional[int] = is_training
A__ : Dict = use_input_mask
A__ : Any = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : List[str] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : Dict = num_choices
A__ : Dict = scope
A__ : List[Any] = vocab_size - 1
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
A__ : Any = GPTNeoXModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case )
A__ : Optional[int] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = True
A__ : str = GPTNeoXModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Dict , snake_case : List[Any] , snake_case : str , snake_case : Optional[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
A__ : int = self.num_labels
A__ : int = GPTNeoXForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : str , snake_case : Tuple , snake_case : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
A__ : List[Any] = self.num_labels
A__ : Tuple = GPTNeoXForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : int , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Tuple = self.num_labels
A__ : Any = GPTNeoXForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Any ):
'''simple docstring'''
A__ : Optional[int] = True
A__ : Any = GPTNeoXForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
A__ : Tuple = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Tuple = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
A__ : List[Any] = output_from_no_past["""hidden_states"""][0]
A__ : List[str] = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["""hidden_states"""][0]
# select random slice
A__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Dict = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = GPTNeoXModelTester(self )
A__ : Any = ConfigTester(self , config_class=snake_case , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ , A__ , A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ , A__ , A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
A__ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Union[str, Any] = GPTNeoXModel(snake_case )
original_model.to(snake_case )
original_model.eval()
A__ : Optional[int] = original_model(snake_case ).last_hidden_state
A__ : List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
A__ : Optional[int] = GPTNeoXModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
A__ : List[str] = scaled_model(snake_case ).last_hidden_state
A__ : Tuple = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
A__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case )
A__ : Optional[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ : Union[str, Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
A__ : Tuple = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 )
A__ : Tuple = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
| 296 | 1 |
import os
__lowerCAmelCase : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
while index < len(A_ ) - 1:
__magic_name__ = SYMBOLS[numerals[index]]
__magic_name__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = """"""
__magic_name__ = num // 1000
numerals += m_count * "M"
num %= 1000
__magic_name__ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__magic_name__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a__ ( A_ = "/p089_roman.txt" ):
'''simple docstring'''
__magic_name__ = 0
with open(os.path.dirname(A_ ) + roman_numerals_filename ) as filea:
__magic_name__ = filea.readlines()
for line in lines:
__magic_name__ = line.strip()
__magic_name__ = parse_roman_numerals(A_ )
__magic_name__ = generate_roman_numerals(A_ )
savings += len(A_ ) - len(A_ )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE : Any = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE : Tuple = BASE_URL + """/user"""
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE : Optional[Any] = os.environ.get("""USER_TOKEN""", """""")
def lowercase ( _snake_case : str ) ->dict[Any, Any]:
"""simple docstring"""
__snake_case : Dict = {
'''Authorization''': f"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_snake_case , headers=_snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 24 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24 | 1 |
def a_ ( lowerCAmelCase_ : int ):
if n_term == "":
return []
__lowerCAmelCase = []
for temp in range(int(UpperCamelCase__ ) ):
series.append(F"""1/{temp + 1}""" if series else '1' )
return series
if __name__ == "__main__":
_snake_case : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 284 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__A =namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
UpperCAmelCase__ : Any = TestCommand(*UpperCamelCase__ )
test_command.run()
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , """README.md""" )
assert os.path.exists(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
UpperCAmelCase__ : Any = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = getattr(dataset_infos["""default"""] , UpperCamelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 163 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.